diff --git a/.changes/3ad44fd8-eed9-46b7-9b9f-1024774dd001.json b/.changes/3ad44fd8-eed9-46b7-9b9f-1024774dd001.json new file mode 100644 index 00000000000..3879c40e79a --- /dev/null +++ b/.changes/3ad44fd8-eed9-46b7-9b9f-1024774dd001.json @@ -0,0 +1,5 @@ +{ + "id": "3ad44fd8-eed9-46b7-9b9f-1024774dd001", + "type": "misc", + "description": "Sync AWS service models" +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/accessanalyzer.json b/codegen/sdk/aws-models/accessanalyzer.json index 4c4c4c24535..4628e7d3cf0 100644 --- a/codegen/sdk/aws-models/accessanalyzer.json +++ b/codegen/sdk/aws-models/accessanalyzer.json @@ -7198,4 +7198,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/appflow.json b/codegen/sdk/aws-models/appflow.json index b4bcc0d2fd7..0335ccd2146 100644 --- a/codegen/sdk/aws-models/appflow.json +++ b/codegen/sdk/aws-models/appflow.json @@ -3650,7 +3650,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "^[\\s\\w/!@#+=,.-]*$" + "smithy.api#pattern": "^[\\s\\w/!@#+=.-]*$" } }, "com.amazonaws.appflow#EntityName": { @@ -4746,18 +4746,6 @@ "traits": { "smithy.api#documentation": "
The version of the API that's used by the connector.
" } - }, - "maxResults": { - "target": "com.amazonaws.appflow#ListEntitiesMaxResults", - "traits": { - "smithy.api#documentation": "The maximum number of items that the operation returns in the response.
" - } - }, - "nextToken": { - "target": "com.amazonaws.appflow#NextToken", - "traits": { - "smithy.api#documentation": "A token that was provided by your prior ListConnectorEntities
operation if\n the response was too big for the page size. You specify this token to get the next page of\n results in paginated response.
The response of ListConnectorEntities
lists entities grouped by category.\n This map's key represents the group name, and its value contains the list of entities\n belonging to that group.
A token that you specify in your next ListConnectorEntities
operation to get\n the next page of results in paginated response. The ListConnectorEntities
\n operation provides this token if the response is too big for the page size.
Amazon Athena is an interactive query service that lets you use standard SQL\n to analyze data directly in Amazon S3. You can point Athena at your\n data in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so there is no infrastructure to set up or manage. You pay\n only for the queries you run. Athena scales automatically—executing queries\n in parallel—so results are fast, even with large datasets and complex queries. For more\n information, see What is Amazon Athena in the Amazon Athena User\n Guide.
\nIf you connect to Athena using the JDBC driver, use version 1.1.0 of the\n driver or later with the Amazon Athena API. Earlier version drivers do not\n support the API. For more information and to download the driver, see Accessing\n Amazon Athena with JDBC.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", + "smithy.api#documentation": "Amazon Athena is an interactive query service that lets you use standard SQL\n to analyze data directly in Amazon S3. You can point Athena at your\n data in Amazon S3 and run ad-hoc queries and get results in seconds. Athena is serverless, so there is no infrastructure to set up or manage. You pay\n only for the queries you run. Athena scales automatically—executing queries\n in parallel—so results are fast, even with large datasets and complex queries. For more\n information, see What is Amazon Athena in the Amazon Athena User\n Guide.
\nIf you connect to Athena using the JDBC driver, use version 1.1.0 of the\n driver or later with the Amazon Athena API. Earlier version drivers do not\n support the API. For more information and to download the driver, see Accessing\n Amazon Athena with JDBC.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", "smithy.api#title": "Amazon Athena", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -306,6 +306,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -2092,7 +2101,7 @@ "ErrorCategory": { "target": "com.amazonaws.athena#ErrorCategory", "traits": { - "smithy.api#documentation": "An integer value that specifies the category of a query failure error. The following\n list shows the category for each integer value.
\n\n 1 - System
\n\n 2 - User
\n\n 3 - Other
" + "smithy.api#documentation": "An integer value that specifies the category of a query failure error. The following\n list shows the category for each integer value.
\n\n 1 - System
\n\n 2 - User
\n\n 3 - Other
" } }, "ErrorType": { @@ -2471,7 +2480,7 @@ "State": { "target": "com.amazonaws.athena#CalculationExecutionState", "traits": { - "smithy.api#documentation": "The state of the calculation execution. A description of each state follows.
\n\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
The state of the calculation execution. A description of each state follows.
\n\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
Specifies the Lambda function or functions to use for creating the data\n catalog. This is a mapping whose values depend on the catalog type.
\nFor the HIVE
data catalog type, use the following syntax. The\n metadata-function
parameter is required. The\n sdk-version
parameter is optional and defaults to the currently\n supported version.
\n metadata-function=lambda_arn,\n sdk-version=version_number\n
\n
For the LAMBDA
data catalog type, use one of the following sets\n of required parameters, but not both.
If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.
\n\n metadata-function=lambda_arn,\n record-function=lambda_arn\n
\n
If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.
\n\n function=lambda_arn\n
\n
The GLUE
type takes a catalog ID parameter and is required. The\n \n catalog_id\n
is the account ID of the\n Amazon Web Services account to which the Glue Data Catalog\n belongs.
\n catalog-id=catalog_id\n
\n
The GLUE
data catalog type also applies to the default\n AwsDataCatalog
that already exists in your account, of\n which you can have only one and cannot modify.
Queries that specify a Glue Data Catalog other than the default\n AwsDataCatalog
must be run on Athena engine\n version 2.
In Regions where Athena engine version 2 is not available,\n creating new Glue data catalogs results in an\n INVALID_INPUT
error.
Specifies the Lambda function or functions to use for creating the data\n catalog. This is a mapping whose values depend on the catalog type.
\nFor the HIVE
data catalog type, use the following syntax. The\n metadata-function
parameter is required. The\n sdk-version
parameter is optional and defaults to the currently\n supported version.
\n metadata-function=lambda_arn,\n sdk-version=version_number\n
\n
For the LAMBDA
data catalog type, use one of the following sets\n of required parameters, but not both.
If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.
\n\n metadata-function=lambda_arn,\n record-function=lambda_arn\n
\n
If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.
\n\n function=lambda_arn\n
\n
The GLUE
type takes a catalog ID parameter and is required. The\n \n catalog_id\n
is the account ID of the\n Amazon Web Services account to which the Glue Data Catalog\n belongs.
\n catalog-id=catalog_id\n
\n
The GLUE
data catalog type also applies to the default\n AwsDataCatalog
that already exists in your account, of\n which you can have only one and cannot modify.
Queries that specify a Glue Data Catalog other than the default\n AwsDataCatalog
must be run on Athena engine\n version 2.
In Regions where Athena engine version 2 is not available,\n creating new Glue data catalogs results in an\n INVALID_INPUT
error.
Creates a named query in the specified workgroup. Requires that you have access to the\n workgroup.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", + "smithy.api#documentation": "Creates a named query in the specified workgroup. Requires that you have access to the\n workgroup.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", "smithy.api#idempotent": {} } }, @@ -2822,7 +2831,7 @@ "ClientRequestToken": { "target": "com.amazonaws.athena#IdempotencyToken", "traits": { - "smithy.api#documentation": "A unique case-sensitive string used to ensure the request to create the query is\n idempotent (executes only once). If another CreateNamedQuery
request is\n received, the same response is returned and another query is not created. If a parameter\n has changed, for example, the QueryString
, an error is returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the query is\n idempotent (executes only once). If another CreateNamedQuery
request is\n received, the same response is returned and another query is not created. If a parameter\n has changed, for example, the QueryString
, an error is returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nSpecifies the Lambda function or functions to use for the data catalog.\n This is a mapping whose values depend on the catalog type.
\nFor the HIVE
data catalog type, use the following syntax. The\n metadata-function
parameter is required. The\n sdk-version
parameter is optional and defaults to the currently\n supported version.
\n metadata-function=lambda_arn,\n sdk-version=version_number\n
\n
For the LAMBDA
data catalog type, use one of the following sets\n of required parameters, but not both.
If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.
\n\n metadata-function=lambda_arn,\n record-function=lambda_arn\n
\n
If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.
\n\n function=lambda_arn\n
\n
The GLUE
type takes a catalog ID parameter and is required. The\n \n catalog_id\n
is the account ID of the\n Amazon Web Services account to which the Glue catalog\n belongs.
\n catalog-id=catalog_id\n
\n
The GLUE
data catalog type also applies to the default\n AwsDataCatalog
that already exists in your account, of\n which you can have only one and cannot modify.
Queries that specify a Glue Data Catalog other than the default\n AwsDataCatalog
must be run on Athena engine\n version 2.
Specifies the Lambda function or functions to use for the data catalog.\n This is a mapping whose values depend on the catalog type.
\nFor the HIVE
data catalog type, use the following syntax. The\n metadata-function
parameter is required. The\n sdk-version
parameter is optional and defaults to the currently\n supported version.
\n metadata-function=lambda_arn,\n sdk-version=version_number\n
\n
For the LAMBDA
data catalog type, use one of the following sets\n of required parameters, but not both.
If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.
\n\n metadata-function=lambda_arn,\n record-function=lambda_arn\n
\n
If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.
\n\n function=lambda_arn\n
\n
The GLUE
type takes a catalog ID parameter and is required. The\n \n catalog_id\n
is the account ID of the\n Amazon Web Services account to which the Glue catalog\n belongs.
\n catalog-id=catalog_id\n
\n
The GLUE
data catalog type also applies to the default\n AwsDataCatalog
that already exists in your account, of\n which you can have only one and cannot modify.
Queries that specify a Glue Data Catalog other than the default\n AwsDataCatalog
must be run on Athena engine\n version 2.
Deletes the named query if you have access to the workgroup in which the query was\n saved.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", + "smithy.api#documentation": "Deletes the named query if you have access to the workgroup in which the query was\n saved.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", "smithy.api#idempotent": {} } }, @@ -3459,7 +3468,7 @@ "EncryptionOption": { "target": "com.amazonaws.athena#EncryptionOption", "traits": { - "smithy.api#documentation": "Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE_S3
), server-side encryption with KMS-managed keys\n (SSE_KMS
), or client-side encryption with KMS-managed keys\n (CSE_KMS
) is used.
If a query runs in a workgroup and the workgroup overrides client-side settings, then\n the workgroup's setting for encryption is used. It specifies whether query results must\n be encrypted, for all queries that run in this workgroup.
", + "smithy.api#documentation": "Indicates whether Amazon S3 server-side encryption with Amazon S3-managed keys (SSE_S3
), server-side encryption with KMS-managed keys\n (SSE_KMS
), or client-side encryption with KMS-managed keys\n (CSE_KMS
) is used.
If a query runs in a workgroup and the workgroup overrides client-side settings, then\n the workgroup's setting for encryption is used. It specifies whether query results must\n be encrypted, for all queries that run in this workgroup.
", "smithy.api#required": {} } }, @@ -3721,7 +3730,7 @@ "ExecutorState": { "target": "com.amazonaws.athena#ExecutorState", "traits": { - "smithy.api#documentation": "The processing state of the executor. A description of each state follows.
\n\n CREATING
- The executor is being started, including acquiring\n resources.
\n CREATED
- The executor has been started.
\n REGISTERED
- The executor has been registered.
\n TERMINATING
- The executor is in the process of shutting down.
\n TERMINATED
- The executor is no longer running.
\n FAILED
- Due to a failure, the executor is no longer running.
The processing state of the executor. A description of each state follows.
\n\n CREATING
- The executor is being started, including acquiring\n resources.
\n CREATED
- The executor has been started.
\n REGISTERED
- The executor has been registered.
\n TERMINATING
- The executor is in the process of shutting down.
\n TERMINATED
- The executor is no longer running.
\n FAILED
- Due to a failure, the executor is no longer running.
Streams the results of a single query execution specified by\n QueryExecutionId
from the Athena query results location in\n Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query\n but returns results. Use StartQueryExecution to run a query.
To stream query results successfully, the IAM principal with permission to call\n GetQueryResults
also must have permissions to the Amazon S3\n GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3\n GetObject
action for the query results location are able to retrieve\n query results from Amazon S3 even if permission to the\n GetQueryResults
action is denied. To restrict user or role access,\n ensure that Amazon S3 permissions to the Athena query location\n are denied.
Streams the results of a single query execution specified by\n QueryExecutionId
from the Athena query results location in\n Amazon S3. For more information, see Query Results in the Amazon Athena User Guide. This request does not execute the query\n but returns results. Use StartQueryExecution to run a query.
To stream query results successfully, the IAM principal with permission to call\n GetQueryResults
also must have permissions to the Amazon S3\n GetObject
action for the Athena query results location.
IAM principals with permission to the Amazon S3\n GetObject
action for the query results location are able to retrieve\n query results from Amazon S3 even if permission to the\n GetQueryResults
action is denied. To restrict user or role access,\n ensure that Amazon S3 permissions to the Athena query location\n are denied.
Returns query execution runtime statistics related to a single execution of a query if\n you have access to the workgroup in which the query ran. Query execution runtime\n statistics are returned only when QueryExecutionStatus$State is in a\n SUCCEEDED or FAILED state. Stage-level input and output row count and data size\n statistics are not shown when a query has row-level filters defined in Lake\n Formation.
" + "smithy.api#documentation": "Returns query execution runtime statistics related to a single execution of a query if\n you have access to the workgroup in which the query ran. The query execution runtime\n statistics is returned only when QueryExecutionStatus$State is in a\n SUCCEEDED or FAILED state.
" } }, "com.amazonaws.athena#GetQueryRuntimeStatisticsInput": { @@ -4710,7 +4716,7 @@ "ClientRequestToken": { "target": "com.amazonaws.athena#ClientRequestToken", "traits": { - "smithy.api#documentation": "A unique case-sensitive string used to ensure the request to import the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to import the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA filter for a specific calculation execution state. A description of each state\n follows.
\n\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
A filter for a specific calculation execution state. A description of each state\n follows.
\n\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
A filter for a specific executor state. A description of each state follows.
\n\n CREATING
- The executor is being started, including acquiring\n resources.
\n CREATED
- The executor has been started.
\n REGISTERED
- The executor has been registered.
\n TERMINATING
- The executor is in the process of shutting down.
\n TERMINATED
- The executor is no longer running.
\n FAILED
- Due to a failure, the executor is no longer running.
A filter for a specific executor state. A description of each state follows.
\n\n CREATING
- The executor is being started, including acquiring\n resources.
\n CREATED
- The executor has been started.
\n REGISTERED
- The executor has been registered.
\n TERMINATING
- The executor is in the process of shutting down.
\n TERMINATED
- The executor is no longer running.
\n FAILED
- Due to a failure, the executor is no longer running.
Provides a list of available query IDs only for queries saved in the specified\n workgroup. Requires that you have access to the specified workgroup. If a workgroup is\n not specified, lists the saved queries for the primary workgroup.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", + "smithy.api#documentation": "Provides a list of available query IDs only for queries saved in the specified\n workgroup. Requires that you have access to the specified workgroup. If a workgroup is\n not specified, lists the saved queries for the primary workgroup.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5470,7 +5476,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of available query execution IDs for the queries in the specified\n workgroup. If a workgroup is not specified, returns a list of query execution IDs for\n the primary workgroup. Requires you to have access to the workgroup in which the queries\n ran.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", + "smithy.api#documentation": "Provides a list of available query execution IDs for the queries in the specified\n workgroup. If a workgroup is not specified, returns a list of query execution IDs for\n the primary workgroup. Requires you to have access to the workgroup in which the queries\n ran.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5559,7 +5565,7 @@ "StateFilter": { "target": "com.amazonaws.athena#SessionState", "traits": { - "smithy.api#documentation": "A filter for a specific session state. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
A filter for a specific session state. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
The state of query execution. QUEUED
indicates that the query has been\n submitted to the service, and Athena will execute the query as soon as\n resources are available. RUNNING
indicates that the query is in execution\n phase. SUCCEEDED
indicates that the query completed without errors.\n FAILED
indicates that the query experienced an error and did not\n complete processing. CANCELLED
indicates that a user input interrupted\n query execution.
Athena automatically retries your queries in cases of certain\n transient errors. As a result, you may see the query state transition from\n RUNNING
or FAILED
to QUEUED
.
The state of query execution. QUEUED
indicates that the query has been\n submitted to the service, and Athena will execute the query as soon as\n resources are available. RUNNING
indicates that the query is in execution\n phase. SUCCEEDED
indicates that the query completed without errors.\n FAILED
indicates that the query experienced an error and did not\n complete processing. CANCELLED
indicates that a user input interrupted\n query execution.
Athena automatically retries your queries in cases of certain\n transient errors. As a result, you may see the query state transition from\n RUNNING
or FAILED
to QUEUED
.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner
when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner
\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.
This is a client-side setting. If workgroup settings override client-side settings,\n then the query uses the ExpectedBucketOwner
setting that is specified for\n the workgroup, and also uses the location for storing query results specified in the\n workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration\n and Workgroup Settings Override Client-Side Settings.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner
when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner
\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.
This is a client-side setting. If workgroup settings override client-side settings,\n then the query uses the ExpectedBucketOwner
setting that is specified for\n the workgroup, and also uses the location for storing query results specified in the\n workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration\n and Workgroup Settings Override Client-Side Settings.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner
when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner
\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.
If workgroup settings override client-side settings, then the query uses the\n ExpectedBucketOwner
setting that is specified for the workgroup, and\n also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
The Amazon Web Services account ID that you expect to be the owner of the Amazon S3 bucket specified by ResultConfiguration$OutputLocation.\n If set, Athena uses the value for ExpectedBucketOwner
when it\n makes Amazon S3 calls to your specified output location. If the\n ExpectedBucketOwner
\n Amazon Web Services account ID does not match the actual owner of the Amazon S3\n bucket, the call fails with a permissions error.
If workgroup settings override client-side settings, then the query uses the\n ExpectedBucketOwner
setting that is specified for the workgroup, and\n also uses the location for storing query results specified in the workgroup. See WorkGroupConfiguration$EnforceWorkGroupConfiguration and Workgroup Settings Override Client-Side Settings.
The state of the session. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
The state of the session. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
A unique case-sensitive string used to ensure the request to create the calculation is\n idempotent (executes only once). If another\n StartCalculationExecutionRequest
is received, the same response is\n returned and another calculation is not created. If a parameter has changed, an error is\n returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the calculation is\n idempotent (executes only once). If another\n StartCalculationExecutionRequest
is received, the same response is\n returned and another calculation is not created. If a parameter has changed, an error is\n returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\n\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
A unique case-sensitive string used to ensure the request to create the query is\n idempotent (executes only once). If another StartQueryExecution
request is\n received, the same response is returned and another query is not created. If a parameter\n has changed, for example, the QueryString
, an error is returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the query is\n idempotent (executes only once). If another StartQueryExecution
request is\n received, the same response is returned and another query is not created. If a parameter\n has changed, for example, the QueryString
, an error is returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the session is\n idempotent (executes only once). If another StartSessionRequest
is\n received, the same response is returned and another session is not created. If a\n parameter has changed, an error is returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the session is\n idempotent (executes only once). If another StartSessionRequest
is\n received, the same response is returned and another session is not created. If a\n parameter has changed, an error is returned.
This token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for users. If you are\n not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nThe state of the session. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
The state of the session. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
Requests the cancellation of a calculation. A StopCalculationExecution
\n call on a calculation that is already in a terminal state (for example,\n STOPPED
, FAILED
, or COMPLETED
) succeeds but\n has no effect.
Cancelling a calculation is done on a best effort basis. If a calculation cannot\n be cancelled, you can be charged for its completion. If you are concerned about\n being charged for a calculation that cannot be cancelled, consider terminating the\n session in which the calculation is running.
\nRequests the cancellation of a calculation. A StopCalculationExecution
\n call on a calculation that is already in a terminal state (for example,\n STOPPED
, FAILED
, or COMPLETED
) succeeds but\n has no effect.
Cancelling a calculation is done on a best effort basis. If a calculation cannot\n be cancelled, you can be charged for its completion. If you are concerned about\n being charged for a calculation that cannot be cancelled, consider terminating the\n session in which the calculation is running.
\n\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
\n CREATING
- The calculation is in the process of being created.
\n CREATED
- The calculation has been created and is ready to run.
\n QUEUED
- The calculation has been queued for processing.
\n RUNNING
- The calculation is running.
\n CANCELING
- A request to cancel the calculation has been received and the\n system is working to stop it.
\n CANCELED
- The calculation is no longer running as the result of a cancel\n request.
\n COMPLETED
- The calculation has completed without error.
\n FAILED
- The calculation failed and is no longer running.
Stops a query execution. Requires you to have access to the workgroup in which the\n query ran.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", + "smithy.api#documentation": "Stops a query execution. Requires you to have access to the workgroup in which the\n query ran.
\nFor code samples using the Amazon Web Services SDK for Java, see Examples and\n Code Samples in the Amazon Athena User\n Guide.
", "smithy.api#idempotent": {} } }, @@ -7798,7 +7801,7 @@ "State": { "target": "com.amazonaws.athena#SessionState", "traits": { - "smithy.api#documentation": "The state of the session. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
The state of the session. A description of each state follows.
\n\n CREATING
- The session is being started, including acquiring\n resources.
\n CREATED
- The session has been started.
\n IDLE
- The session is able to accept a calculation.
\n BUSY
- The session is processing another task and is unable to accept a\n calculation.
\n TERMINATING
- The session is in the process of shutting down.
\n TERMINATED
- The session and its resources are no longer running.
\n DEGRADED
- The session has no healthy coordinators.
\n FAILED
- Due to a failure, the session and its resources are no longer\n running.
The error message containing the reason why the prepared statement could not be\n returned. The following error messages are possible:
\n\n INVALID_INPUT
- The name of the prepared statement that was\n provided is not valid (for example, the name is too long).
\n STATEMENT_NOT_FOUND
- A prepared statement with the name provided\n could not be found.
\n UNAUTHORIZED
- The requester does not have permission to access\n the workgroup that contains the prepared statement.
The error message containing the reason why the prepared statement could not be\n returned. The following error messages are possible:
\n\n INVALID_INPUT
- The name of the prepared statement that was\n provided is not valid (for example, the name is too long).
\n STATEMENT_NOT_FOUND
- A prepared statement with the name provided\n could not be found.
\n UNAUTHORIZED
- The requester does not have permission to access\n the workgroup that contains the prepared statement.
Specifies the Lambda function or functions to use for updating the data\n catalog. This is a mapping whose values depend on the catalog type.
\nFor the HIVE
data catalog type, use the following syntax. The\n metadata-function
parameter is required. The\n sdk-version
parameter is optional and defaults to the currently\n supported version.
\n metadata-function=lambda_arn,\n sdk-version=version_number\n
\n
For the LAMBDA
data catalog type, use one of the following sets\n of required parameters, but not both.
If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.
\n\n metadata-function=lambda_arn,\n record-function=lambda_arn\n
\n
If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.
\n\n function=lambda_arn\n
\n
Specifies the Lambda function or functions to use for updating the data\n catalog. This is a mapping whose values depend on the catalog type.
\nFor the HIVE
data catalog type, use the following syntax. The\n metadata-function
parameter is required. The\n sdk-version
parameter is optional and defaults to the currently\n supported version.
\n metadata-function=lambda_arn,\n sdk-version=version_number\n
\n
For the LAMBDA
data catalog type, use one of the following sets\n of required parameters, but not both.
If you have one Lambda function that processes metadata\n and another for reading the actual data, use the following syntax. Both\n parameters are required.
\n\n metadata-function=lambda_arn,\n record-function=lambda_arn\n
\n
If you have a composite Lambda function that processes\n both metadata and data, use the following syntax to specify your Lambda function.
\n\n function=lambda_arn\n
\n
The updated content for the notebook.
", - "smithy.api#required": {} + "smithy.api#documentation": "The updated content for the notebook.
" } }, "Type": { "target": "com.amazonaws.athena#NotebookType", "traits": { - "smithy.api#documentation": "The notebook content type. Currently, the only valid type is\n IPYNB
.
The notebook content type. Currently, the only valid type is\n IPYNB
.
A unique case-sensitive string used to ensure the request to create the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nA unique case-sensitive string used to ensure the request to create the notebook is\n idempotent (executes only once).
\nThis token is listed as not required because Amazon Web Services SDKs (for example\n the Amazon Web Services SDK for Java) auto-generate the token for you. If you are not\n using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide\n this token or the action will fail.
\nThe average upload rate limit component of the bandwidth rate limit \n interval, in bits per second. This field does not appear in the response if \n the upload rate limit is not set.
\nFor Backup Gateway, the minimum value is (Value)
.
The hour of the day to start the bandwidth rate limit interval.
", - "smithy.api#required": {} - } - }, - "EndHourOfDay": { - "target": "com.amazonaws.backupgateway#HourOfDay", - "traits": { - "smithy.api#documentation": "The hour of the day to end the bandwidth rate limit interval.
", - "smithy.api#required": {} - } - }, - "StartMinuteOfHour": { - "target": "com.amazonaws.backupgateway#MinuteOfHour", - "traits": { - "smithy.api#documentation": "The minute of the hour to start the bandwidth rate limit interval. The \n interval begins at the start of that minute. To begin an interval exactly at \n the start of the hour, use the value 0
.
The minute of the hour to end the bandwidth rate limit interval.
\nThe bandwidth rate limit interval ends at the end of the minute. \n To end an interval at the end of an hour, use the value 59
.
The days of the week component of the bandwidth rate limit interval, \n represented as ordinal numbers from 0 to 6, where 0 represents Sunday and 6 represents \n Saturday.
", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "Describes a bandwidth rate limit interval for a gateway. A bandwidth \n rate limit schedule consists of one or more bandwidth rate limit intervals. \n A bandwidth rate limit interval defines a period of time on one or more days \n of the week, during which bandwidth rate limits are specified for uploading, \n downloading, or both.
" - } - }, - "com.amazonaws.backupgateway#BandwidthRateLimitIntervals": { - "type": "list", - "member": { - "target": "com.amazonaws.backupgateway#BandwidthRateLimitInterval" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 20 - } - } - }, - "com.amazonaws.backupgateway#BandwidthRateLimitScheduleResource": { - "type": "resource", - "identifiers": { - "GatewayArn": { - "target": "com.amazonaws.backupgateway#GatewayArn" - } - }, - "put": { - "target": "com.amazonaws.backupgateway#PutBandwidthRateLimitSchedule" - }, - "read": { - "target": "com.amazonaws.backupgateway#GetBandwidthRateLimitSchedule" - } - }, "com.amazonaws.backupgateway#ConflictException": { "type": "structure", "members": { @@ -1792,6 +1717,14 @@ "output": { "target": "com.amazonaws.backupgateway#CreateGatewayOutput" }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], "traits": { "smithy.api#documentation": "Creates a backup gateway. After you create a gateway, you can associate it with a server\n using the AssociateGatewayToServer
operation.
Retrieves the bandwidth rate limit schedule for a specified gateway. \n By default, gateways do not have bandwidth rate limit schedules, which means \n no bandwidth rate limiting is in effect. Use this to get a gateway's \n bandwidth rate limit schedule.
", + "smithy.api#documentation": "By providing the ARN (Amazon Resource Name), this\n API returns the gateway.
", "smithy.api#readonly": {} } }, - "com.amazonaws.backupgateway#GetBandwidthRateLimitScheduleInput": { + "com.amazonaws.backupgateway#GetGatewayInput": { "type": "structure", "members": { "GatewayArn": { "target": "com.amazonaws.backupgateway#GatewayArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the gateway. Use the \n \n ListGateways
\n operation to return a list of gateways \n for your account and Amazon Web Services Region.
The Amazon Resource Name (ARN) of the gateway.
", "smithy.api#required": {} } } } }, - "com.amazonaws.backupgateway#GetBandwidthRateLimitScheduleOutput": { + "com.amazonaws.backupgateway#GetGatewayOutput": { "type": "structure", "members": { - "GatewayArn": { - "target": "com.amazonaws.backupgateway#GatewayArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the gateway. Use the \n \n ListGateways
\n operation to return a list of gateways \n for your account and Amazon Web Services Region.
An array containing bandwidth rate limit schedule intervals for a gateway. \n When no bandwidth rate limit intervals have been scheduled, the array is empty.
" + "smithy.api#documentation": "By providing the ARN (Amazon Resource Name), this\n API returns the gateway.
" } } } }, - "com.amazonaws.backupgateway#GetGateway": { + "com.amazonaws.backupgateway#GetVirtualMachine": { "type": "operation", "input": { - "target": "com.amazonaws.backupgateway#GetGatewayInput" + "target": "com.amazonaws.backupgateway#GetVirtualMachineInput" }, "output": { - "target": "com.amazonaws.backupgateway#GetGatewayOutput" + "target": "com.amazonaws.backupgateway#GetVirtualMachineOutput" }, "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "By providing the ARN (Amazon Resource Name), this\n API returns the gateway.
", - "smithy.api#readonly": {} - } - }, - "com.amazonaws.backupgateway#GetGatewayInput": { - "type": "structure", - "members": { - "GatewayArn": { - "target": "com.amazonaws.backupgateway#GatewayArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the gateway.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.backupgateway#GetGatewayOutput": { - "type": "structure", - "members": { - "Gateway": { - "target": "com.amazonaws.backupgateway#GatewayDetails", - "traits": { - "smithy.api#documentation": "By providing the ARN (Amazon Resource Name), this\n API returns the gateway.
" - } - } - } - }, - "com.amazonaws.backupgateway#GetHypervisor": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupgateway#GetHypervisorInput" - }, - "output": { - "target": "com.amazonaws.backupgateway#GetHypervisorOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupgateway#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "This action requests information about the specified hypervisor to which the gateway will connect. \n A hypervisor is hardware, software, or firmware that creates and manages virtual machines, \n and allocates resources to them.
", - "smithy.api#readonly": {} - } - }, - "com.amazonaws.backupgateway#GetHypervisorInput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.backupgateway#GetHypervisorOutput": { - "type": "structure", - "members": { - "Hypervisor": { - "target": "com.amazonaws.backupgateway#HypervisorDetails", - "traits": { - "smithy.api#documentation": "Details about the requested hypervisor.
" - } - } - } - }, - "com.amazonaws.backupgateway#GetHypervisorPropertyMappings": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupgateway#GetHypervisorPropertyMappingsInput" - }, - "output": { - "target": "com.amazonaws.backupgateway#GetHypervisorPropertyMappingsOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupgateway#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "This action retrieves the property mappings for the specified hypervisor. \n A hypervisor property mapping displays the relationship of entity properties \n available from the on-premises hypervisor to the properties available in Amazon Web Services.
", - "smithy.api#readonly": {} - } - }, - "com.amazonaws.backupgateway#GetHypervisorPropertyMappingsInput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.backupgateway#GetHypervisorPropertyMappingsOutput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
" - } - }, - "VmwareToAwsTagMappings": { - "target": "com.amazonaws.backupgateway#VmwareToAwsTagMappings", - "traits": { - "smithy.api#documentation": "This is a display of the mappings of on-premises VMware tags to the \n Amazon Web Services tags.
" - } }, - "IamRoleArn": { - "target": "com.amazonaws.backupgateway#IamRoleArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role.
" - } - } - } - }, - "com.amazonaws.backupgateway#GetVirtualMachine": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupgateway#GetVirtualMachineInput" - }, - "output": { - "target": "com.amazonaws.backupgateway#GetVirtualMachineOutput" - }, - "errors": [ { - "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -2451,68 +2253,6 @@ "smithy.api#documentation": "Represents the hypervisor's permissions to which the gateway will connect.
\nA hypervisor is hardware, software, or firmware that creates and manages virtual machines,\n and allocates resources to them.
" } }, - "com.amazonaws.backupgateway#HypervisorDetails": { - "type": "structure", - "members": { - "Host": { - "target": "com.amazonaws.backupgateway#Host", - "traits": { - "smithy.api#documentation": "The server host of the hypervisor. This can be either an IP address or \n a fully-qualified domain name (FQDN).
" - } - }, - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
" - } - }, - "KmsKeyArn": { - "target": "com.amazonaws.backupgateway#KmsKeyArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the KMS \n used to encrypt the hypervisor.
" - } - }, - "Name": { - "target": "com.amazonaws.backupgateway#Name", - "traits": { - "smithy.api#documentation": "This is the name of the specified hypervisor.
" - } - }, - "LogGroupArn": { - "target": "com.amazonaws.backupgateway#LogGroupArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the group of gateways within \n the requested log.
" - } - }, - "State": { - "target": "com.amazonaws.backupgateway#HypervisorState", - "traits": { - "smithy.api#documentation": "This is the current state of the specified hypervisor.
\nThe possible states are PENDING
, ONLINE
, \n OFFLINE
, or ERROR
.
This is the time when the most recent successful sync \n of metadata occurred.
" - } - }, - "LatestMetadataSyncStatusMessage": { - "target": "com.amazonaws.backupgateway#string", - "traits": { - "smithy.api#documentation": "This is the most recent status for the indicated metadata sync.
" - } - }, - "LatestMetadataSyncStatus": { - "target": "com.amazonaws.backupgateway#SyncMetadataStatus", - "traits": { - "smithy.api#documentation": "This is the most recent status for the indicated metadata sync.
" - } - } - }, - "traits": { - "smithy.api#documentation": "These are the details of the specified hypervisor. A hypervisor is hardware, \n software, or firmware that creates and manages virtual machines, and allocates \n resources to them.
" - } - }, "com.amazonaws.backupgateway#HypervisorId": { "type": "string", "traits": { @@ -2522,20 +2262,6 @@ } } }, - "com.amazonaws.backupgateway#HypervisorPropertyMappingResource": { - "type": "resource", - "identifiers": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn" - } - }, - "put": { - "target": "com.amazonaws.backupgateway#PutHypervisorPropertyMappings" - }, - "read": { - "target": "com.amazonaws.backupgateway#GetHypervisorPropertyMappings" - } - }, "com.amazonaws.backupgateway#HypervisorResource": { "type": "resource", "identifiers": { @@ -2546,9 +2272,6 @@ "create": { "target": "com.amazonaws.backupgateway#ImportHypervisorConfiguration" }, - "read": { - "target": "com.amazonaws.backupgateway#GetHypervisor" - }, "update": { "target": "com.amazonaws.backupgateway#UpdateHypervisor" }, @@ -2558,16 +2281,6 @@ "list": { "target": "com.amazonaws.backupgateway#ListHypervisors" }, - "operations": [ - { - "target": "com.amazonaws.backupgateway#StartVirtualMachinesMetadataSync" - } - ], - "resources": [ - { - "target": "com.amazonaws.backupgateway#HypervisorPropertyMappingResource" - } - ], "traits": { "aws.cloudformation#cfnResource": { "name": "Hypervisor" @@ -2603,16 +2316,6 @@ "target": "com.amazonaws.backupgateway#Hypervisor" } }, - "com.amazonaws.backupgateway#IamRoleArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 20, - "max": 2048 - }, - "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):iam::([0-9]+):role/(\\S+)$" - } - }, "com.amazonaws.backupgateway#ImportHypervisorConfiguration": { "type": "operation", "input": { @@ -2626,7 +2329,10 @@ "target": "com.amazonaws.backupgateway#AccessDeniedException" }, { - "target": "com.amazonaws.backupgateway#ConflictException" + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -2724,6 +2430,14 @@ "output": { "target": "com.amazonaws.backupgateway#ListGatewaysOutput" }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], "traits": { "smithy.api#documentation": "Lists backup gateways owned by an Amazon Web Services account in an Amazon Web Services Region. The returned list is ordered by gateway Amazon Resource Name (ARN).
", "smithy.api#paginated": { @@ -2774,6 +2488,14 @@ "output": { "target": "com.amazonaws.backupgateway#ListHypervisorsOutput" }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], "traits": { "smithy.api#documentation": "Lists your hypervisors.
", "smithy.api#paginated": { @@ -2825,8 +2547,14 @@ "target": "com.amazonaws.backupgateway#ListTagsForResourceOutput" }, "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -2870,6 +2598,14 @@ "output": { "target": "com.amazonaws.backupgateway#ListVirtualMachinesOutput" }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], "traits": { "smithy.api#documentation": "Lists your virtual machines.
", "smithy.api#paginated": { @@ -2918,16 +2654,6 @@ } } }, - "com.amazonaws.backupgateway#LogGroupArn": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 2048 - }, - "smithy.api#pattern": "^$|^arn:(aws|aws-cn|aws-us-gov):logs:([a-zA-Z0-9-]+):([0-9]+):log-group:[a-zA-Z0-9_\\-\\/\\.]+:\\*$" - } - }, "com.amazonaws.backupgateway#MaintenanceStartTime": { "type": "structure", "members": { @@ -3020,115 +2746,6 @@ "smithy.api#pattern": "^[^\\x00]+$" } }, - "com.amazonaws.backupgateway#PutBandwidthRateLimitSchedule": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupgateway#PutBandwidthRateLimitScheduleInput" - }, - "output": { - "target": "com.amazonaws.backupgateway#PutBandwidthRateLimitScheduleOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupgateway#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "This action sets the bandwidth rate limit schedule for a specified gateway. \n By default, gateways do not have a bandwidth rate limit schedule, which means \n no bandwidth rate limiting is in effect. Use this to initiate a \n gateway's bandwidth rate limit schedule.
", - "smithy.api#idempotent": {} - } - }, - "com.amazonaws.backupgateway#PutBandwidthRateLimitScheduleInput": { - "type": "structure", - "members": { - "GatewayArn": { - "target": "com.amazonaws.backupgateway#GatewayArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the gateway. Use the \n \n ListGateways
\n operation to return a list of gateways \n for your account and Amazon Web Services Region.
An array containing bandwidth rate limit schedule intervals for a gateway. \n When no bandwidth rate limit intervals have been scheduled, the array is empty.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.backupgateway#PutBandwidthRateLimitScheduleOutput": { - "type": "structure", - "members": { - "GatewayArn": { - "target": "com.amazonaws.backupgateway#GatewayArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the gateway. Use the \n \n ListGateways
\n operation to return a list of gateways \n for your account and Amazon Web Services Region.
This action sets the property mappings for the specified hypervisor. \n A hypervisor property mapping displays the relationship of entity properties \n available from the on-premises hypervisor to the properties available in Amazon Web Services.
", - "smithy.api#idempotent": {} - } - }, - "com.amazonaws.backupgateway#PutHypervisorPropertyMappingsInput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
", - "smithy.api#required": {} - } - }, - "VmwareToAwsTagMappings": { - "target": "com.amazonaws.backupgateway#VmwareToAwsTagMappings", - "traits": { - "smithy.api#documentation": "This action requests the mappings of on-premises VMware tags to the \n Amazon Web Services tags.
", - "smithy.api#required": {} - } - }, - "IamRoleArn": { - "target": "com.amazonaws.backupgateway#IamRoleArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.backupgateway#PutHypervisorPropertyMappingsOutput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
" - } - } - } - }, "com.amazonaws.backupgateway#PutMaintenanceStartTime": { "type": "operation", "input": { @@ -3141,8 +2758,14 @@ { "target": "com.amazonaws.backupgateway#ConflictException" }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3237,76 +2860,6 @@ "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):backup-gateway(:[a-zA-Z-0-9]+){3}\\/[a-zA-Z-0-9]+$" } }, - "com.amazonaws.backupgateway#StartVirtualMachinesMetadataSync": { - "type": "operation", - "input": { - "target": "com.amazonaws.backupgateway#StartVirtualMachinesMetadataSyncInput" - }, - "output": { - "target": "com.amazonaws.backupgateway#StartVirtualMachinesMetadataSyncOutput" - }, - "errors": [ - { - "target": "com.amazonaws.backupgateway#AccessDeniedException" - }, - { - "target": "com.amazonaws.backupgateway#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "This action sends a request to sync metadata across the specified virtual machines.
" - } - }, - "com.amazonaws.backupgateway#StartVirtualMachinesMetadataSyncInput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.backupgateway#StartVirtualMachinesMetadataSyncOutput": { - "type": "structure", - "members": { - "HypervisorArn": { - "target": "com.amazonaws.backupgateway#ServerArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the hypervisor.
" - } - } - } - }, - "com.amazonaws.backupgateway#SyncMetadataStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "CREATED", - "name": "CREATED" - }, - { - "value": "RUNNING", - "name": "RUNNING" - }, - { - "value": "FAILED", - "name": "FAILED" - }, - { - "value": "PARTIALLY_FAILED", - "name": "PARTIALLY_FAILED" - }, - { - "value": "SUCCEEDED", - "name": "SUCCEEDED" - } - ] - } - }, "com.amazonaws.backupgateway#Tag": { "type": "structure", "members": { @@ -3354,8 +2907,14 @@ "target": "com.amazonaws.backupgateway#TagResourceOutput" }, "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3420,8 +2979,14 @@ { "target": "com.amazonaws.backupgateway#ConflictException" }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3495,8 +3060,14 @@ "target": "com.amazonaws.backupgateway#UntagResourceOutput" }, "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3545,8 +3116,14 @@ { "target": "com.amazonaws.backupgateway#ConflictException" }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3591,8 +3168,14 @@ "target": "com.amazonaws.backupgateway#UpdateGatewaySoftwareNowOutput" }, "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3635,10 +3218,13 @@ "target": "com.amazonaws.backupgateway#AccessDeniedException" }, { - "target": "com.amazonaws.backupgateway#ConflictException" + "target": "com.amazonaws.backupgateway#InternalServerException" }, { "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" } ], "traits": { @@ -3678,12 +3264,6 @@ "traits": { "smithy.api#documentation": "The updated name for the hypervisor
" } - }, - "LogGroupArn": { - "target": "com.amazonaws.backupgateway#LogGroupArn", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the group of gateways within the requested log.
" - } } } }, @@ -3810,12 +3390,6 @@ "traits": { "smithy.api#documentation": "The most recent date a virtual machine was backed up, in Unix format and UTC time.
" } - }, - "VmwareTags": { - "target": "com.amazonaws.backupgateway#VmwareTags", - "traits": { - "smithy.api#documentation": "These are the details of the VMware tags associated with the specified \n virtual machine.
" - } } }, "traits": { @@ -3847,98 +3421,6 @@ "target": "com.amazonaws.backupgateway#VirtualMachine" } }, - "com.amazonaws.backupgateway#VmwareCategory": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 80 - } - } - }, - "com.amazonaws.backupgateway#VmwareTag": { - "type": "structure", - "members": { - "VmwareCategory": { - "target": "com.amazonaws.backupgateway#VmwareCategory", - "traits": { - "smithy.api#documentation": "The is the category of VMware.
" - } - }, - "VmwareTagName": { - "target": "com.amazonaws.backupgateway#VmwareTagName", - "traits": { - "smithy.api#documentation": "This is the user-defined name of a VMware tag.
" - } - }, - "VmwareTagDescription": { - "target": "com.amazonaws.backupgateway#string", - "traits": { - "smithy.api#documentation": "This is a user-defined description of a VMware tag.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A VMware tag is a tag attached to a specific virtual machine.\n A tag \n is a key-value pair you can use to manage, filter, and search for your resources.
\nThe content of VMware tags can be matched to Amazon Web Services tags.
" - } - }, - "com.amazonaws.backupgateway#VmwareTagName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 80 - } - } - }, - "com.amazonaws.backupgateway#VmwareTags": { - "type": "list", - "member": { - "target": "com.amazonaws.backupgateway#VmwareTag" - } - }, - "com.amazonaws.backupgateway#VmwareToAwsTagMapping": { - "type": "structure", - "members": { - "VmwareCategory": { - "target": "com.amazonaws.backupgateway#VmwareCategory", - "traits": { - "smithy.api#documentation": "The is the category of VMware.
", - "smithy.api#required": {} - } - }, - "VmwareTagName": { - "target": "com.amazonaws.backupgateway#VmwareTagName", - "traits": { - "smithy.api#documentation": "This is the user-defined name of a VMware tag.
", - "smithy.api#required": {} - } - }, - "AwsTagKey": { - "target": "com.amazonaws.backupgateway#TagKey", - "traits": { - "smithy.api#documentation": "The key part of the Amazon Web Services tag's key-value pair.
", - "smithy.api#required": {} - } - }, - "AwsTagValue": { - "target": "com.amazonaws.backupgateway#TagValue", - "traits": { - "smithy.api#documentation": "The value part of the Amazon Web Services tag's key-value pair.
", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "This displays the mapping of on-premises VMware tags to the \n corresponding Amazon Web Services tags.
" - } - }, - "com.amazonaws.backupgateway#VmwareToAwsTagMappings": { - "type": "list", - "member": { - "target": "com.amazonaws.backupgateway#VmwareToAwsTagMapping" - } - }, "com.amazonaws.backupgateway#VpcEndpoint": { "type": "string", "traits": { diff --git a/codegen/sdk/aws-models/batch.json b/codegen/sdk/aws-models/batch.json index 899a886289f..77050400161 100644 --- a/codegen/sdk/aws-models/batch.json +++ b/codegen/sdk/aws-models/batch.json @@ -128,7 +128,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -177,6 +177,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -489,9 +498,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true + "Region": "ap-south-1" } }, { @@ -502,9 +511,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false + "Region": "ap-south-1" } }, { @@ -515,9 +524,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true + "Region": "ap-south-1" } }, { @@ -528,9 +537,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false + "Region": "ap-south-1" } }, { @@ -541,9 +550,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true + "Region": "eu-south-1" } }, { @@ -554,9 +563,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false + "Region": "eu-south-1" } }, { @@ -567,9 +576,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true + "Region": "eu-south-1" } }, { @@ -580,9 +589,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false + "Region": "eu-south-1" } }, { @@ -593,9 +602,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -606,9 +615,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -619,9 +628,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -632,9 +641,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -645,9 +654,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true + "Region": "me-central-1" } }, { @@ -658,9 +667,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false + "Region": "me-central-1" } }, { @@ -671,9 +680,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true + "Region": "me-central-1" } }, { @@ -684,9 +693,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false + "Region": "me-central-1" } }, { @@ -697,9 +706,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true + "Region": "ca-central-1" } }, { @@ -710,9 +719,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false + "Region": "ca-central-1" } }, { @@ -723,9 +732,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true + "Region": "ca-central-1" } }, { @@ -736,9 +745,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false + "Region": "ca-central-1" } }, { @@ -749,9 +758,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true + "Region": "eu-central-1" } }, { @@ -762,9 +771,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false + "Region": "eu-central-1" } }, { @@ -775,9 +784,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true + "Region": "eu-central-1" } }, { @@ -788,9 +797,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false + "Region": "eu-central-1" } }, { @@ -801,9 +810,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true + "Region": "us-west-1" } }, { @@ -814,9 +823,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false + "Region": "us-west-1" } }, { @@ -827,9 +836,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true + "Region": "us-west-1" } }, { @@ -840,9 +849,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false + "Region": "us-west-1" } }, { @@ -853,9 +862,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true + "Region": "us-west-2" } }, { @@ -866,9 +875,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false + "Region": "us-west-2" } }, { @@ -879,9 +888,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true + "Region": "us-west-2" } }, { @@ -892,9 +901,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false + "Region": "us-west-2" } }, { @@ -905,9 +914,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true + "Region": "af-south-1" } }, { @@ -918,9 +927,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false + "Region": "af-south-1" } }, { @@ -931,9 +940,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true + "Region": "af-south-1" } }, { @@ -944,9 +953,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false + "Region": "af-south-1" } }, { @@ -957,9 +966,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true + "Region": "eu-north-1" } }, { @@ -970,9 +979,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false + "Region": "eu-north-1" } }, { @@ -983,9 +992,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true + "Region": "eu-north-1" } }, { @@ -996,9 +1005,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false + "Region": "eu-north-1" } }, { @@ -1009,9 +1018,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true + "Region": "eu-west-3" } }, { @@ -1022,9 +1031,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false + "Region": "eu-west-3" } }, { @@ -1035,9 +1044,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true + "Region": "eu-west-3" } }, { @@ -1048,9 +1057,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false + "Region": "eu-west-3" } }, { @@ -1061,9 +1070,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true + "Region": "eu-west-2" } }, { @@ -1074,9 +1083,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false + "Region": "eu-west-2" } }, { @@ -1087,9 +1096,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true + "Region": "eu-west-2" } }, { @@ -1100,9 +1109,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false + "Region": "eu-west-2" } }, { @@ -1113,9 +1122,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "eu-west-1" } }, { @@ -1126,9 +1135,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false + "Region": "eu-west-1" } }, { @@ -1139,9 +1148,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "eu-west-1" } }, { @@ -1152,9 +1161,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false + "Region": "eu-west-1" } }, { @@ -1165,9 +1174,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true + "Region": "ap-northeast-3" } }, { @@ -1178,9 +1187,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false + "Region": "ap-northeast-3" } }, { @@ -1191,9 +1200,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true + "Region": "ap-northeast-3" } }, { @@ -1204,9 +1213,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false + "Region": "ap-northeast-3" } }, { @@ -1217,9 +1226,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true + "Region": "ap-northeast-2" } }, { @@ -1230,9 +1239,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false + "Region": "ap-northeast-2" } }, { @@ -1243,9 +1252,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true + "Region": "ap-northeast-2" } }, { @@ -1256,9 +1265,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false + "Region": "ap-northeast-2" } }, { @@ -1269,9 +1278,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "ap-northeast-1" } }, { @@ -1282,9 +1291,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false + "Region": "ap-northeast-1" } }, { @@ -1295,9 +1304,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "ap-northeast-1" } }, { @@ -1308,9 +1317,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false + "Region": "ap-northeast-1" } }, { @@ -1321,9 +1330,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true + "Region": "me-south-1" } }, { @@ -1334,9 +1343,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false + "Region": "me-south-1" } }, { @@ -1347,9 +1356,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true + "Region": "me-south-1" } }, { @@ -1360,9 +1369,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false + "Region": "me-south-1" } }, { @@ -1373,9 +1382,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true + "Region": "sa-east-1" } }, { @@ -1386,9 +1395,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false + "Region": "sa-east-1" } }, { @@ -1399,9 +1408,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true + "Region": "sa-east-1" } }, { @@ -1412,9 +1421,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false + "Region": "sa-east-1" } }, { @@ -1425,9 +1434,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true + "Region": "ap-east-1" } }, { @@ -1438,9 +1447,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": false + "Region": "ap-east-1" } }, { @@ -1451,9 +1460,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true + "Region": "ap-east-1" } }, { @@ -1464,9 +1473,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": false + "Region": "ap-east-1" } }, { @@ -1477,9 +1486,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -1490,9 +1499,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -1503,9 +1512,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -1516,9 +1525,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -1529,9 +1538,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true + "Region": "us-gov-west-1" } }, { @@ -1542,9 +1551,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": false + "Region": "us-gov-west-1" } }, { @@ -1555,9 +1564,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "Region": "us-gov-west-1" } }, { @@ -1568,9 +1577,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": false + "Region": "us-gov-west-1" } }, { @@ -1581,9 +1590,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "ap-southeast-1" } }, { @@ -1594,9 +1603,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": false + "Region": "ap-southeast-1" } }, { @@ -1607,9 +1616,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "ap-southeast-1" } }, { @@ -1620,9 +1629,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": false + "Region": "ap-southeast-1" } }, { @@ -1633,9 +1642,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "ap-southeast-2" } }, { @@ -1646,9 +1655,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": false + "Region": "ap-southeast-2" } }, { @@ -1659,9 +1668,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "ap-southeast-2" } }, { @@ -1672,9 +1681,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": false + "Region": "ap-southeast-2" } }, { @@ -1685,9 +1694,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "ap-southeast-3" } }, { @@ -1698,9 +1707,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": false + "Region": "ap-southeast-3" } }, { @@ -1711,9 +1720,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "ap-southeast-3" } }, { @@ -1724,9 +1733,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": false + "Region": "ap-southeast-3" } }, { @@ -1737,9 +1746,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -1750,9 +1759,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -1763,9 +1772,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -1776,9 +1785,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -1789,9 +1798,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "Region": "us-east-2" } }, { @@ -1802,9 +1811,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false + "Region": "us-east-2" } }, { @@ -1815,9 +1824,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true + "Region": "us-east-2" } }, { @@ -1828,9 +1837,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "Region": "us-east-2" } }, { @@ -1841,9 +1850,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": true + "Region": "cn-northwest-1" } }, { @@ -1854,9 +1863,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": false + "Region": "cn-northwest-1" } }, { @@ -1867,9 +1876,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": true + "Region": "cn-northwest-1" } }, { @@ -1880,9 +1889,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": false + "Region": "cn-northwest-1" } }, { @@ -1893,9 +1902,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1905,9 +1914,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "UseDualStack": false, "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -1917,9 +1926,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "UseDualStack": true, "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": true, "Endpoint": "https://example.com" } } @@ -2511,7 +2520,7 @@ "subnets": { "target": "com.amazonaws.batch#StringList", "traits": { - "smithy.api#documentation": "The VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate\n compute resources can contain up to 16 subnets. For more information, see VPCs and subnets in the Amazon VPC User\n Guide.
\nBatch on Amazon EC2 and Batch on Amazon EKS support Local Zones. For more information, see\n Local Zones in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web Services Local\n Zones in the Amazon EKS User Guide and \n Amazon ECS clusters in Local Zones, Wavelength Zones, and Amazon Web Services Outposts in the\n Amazon ECS Developer Guide.
\nBatch on Fargate doesn't currently support Local Zones.
\nThe VPC subnets where the compute resources are launched. These subnets must be within the same VPC. Fargate\n compute resources can contain up to 16 subnets. For more information, see VPCs and subnets in the Amazon VPC User\n Guide.
", "smithy.api#required": {} } }, @@ -2548,7 +2557,7 @@ "bidPercentage": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that\n instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must\n be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and\n never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand\n price. For most use cases, we recommend\n leaving this field empty.
\nThis parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nThe maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that\n instance type before instances are launched. For example, if your maximum percentage is 20%, then the Spot price must\n be less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and\n never more than your maximum percentage. If you leave this field empty, the default value is 100% of the On-Demand\n price.
\nThis parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nThe VPC subnets where the compute resources are launched. Fargate compute resources can\n contain up to 16 subnets. For Fargate compute resources, providing an empty list will be\n handled as if this parameter wasn't specified and no change is made. For EC2 compute resources,\n providing an empty list removes the VPC subnets from the compute resource. For more information,\n see VPCs and\n subnets in the Amazon VPC User Guide.
\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure\n update of the compute environment. For more information, see Updating compute environments in the\n Batch User Guide.
\nBatch on Amazon EC2 and Batch on Amazon EKS support Local Zones. For more information, see\n Local Zones in the Amazon EC2 User Guide for Linux Instances, Amazon EKS and Amazon Web Services Local\n Zones in the Amazon EKS User Guide and \n Amazon ECS clusters in Local Zones, Wavelength Zones, and Amazon Web Services Outposts in the\n Amazon ECS Developer Guide.
\nBatch on Fargate doesn't currently support Local Zones.
\nThe VPC subnets where the compute resources are launched. Fargate compute resources can contain up to 16\n subnets. For Fargate compute resources, providing an empty list will be handled as if this parameter wasn't\n specified and no change is made. For EC2 compute resources, providing an empty list removes the VPC subnets from the\n compute resource. For more information, see VPCs and subnets in the Amazon VPC User Guide.
\nWhen updating a compute environment, changing the VPC subnets requires an infrastructure update of the compute\n environment. For more information, see Updating compute environments in the Batch User Guide.
" } }, "securityGroupIds": { @@ -2646,7 +2655,7 @@ "bidPercentage": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that\n instance type before instances are launched. For example, if your maximum percentage is 20%, the Spot price must be\n less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and\n never more than your maximum percentage. For\n most use cases, we recommend leaving this field empty.
\nWhen updating a compute environment, changing the bid percentage requires an infrastructure update of the\n compute environment. For more information, see Updating compute environments in the\n Batch User Guide.
\nThis parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nThe maximum percentage that a Spot Instance price can be when compared with the On-Demand price for that\n instance type before instances are launched. For example, if your maximum percentage is 20%, the Spot price must be\n less than 20% of the current On-Demand price for that Amazon EC2 instance. You always pay the lowest (market) price and\n never more than your maximum percentage.
\nWhen updating a compute environment, changing the bid percentage requires an infrastructure update of the\n compute environment. For more information, see Updating compute environments in the\n Batch User Guide.
\nThis parameter isn't applicable to jobs that are running on Fargate resources. Don't specify it.
\nA list of job attempts that are associated with this job.
" } - }, - "isCancelled": { - "target": "com.amazonaws.batch#Boolean", - "traits": { - "smithy.api#documentation": "Indicates whether the job is canceled.
" - } - }, - "isTerminated": { - "target": "com.amazonaws.batch#Boolean", - "traits": { - "smithy.api#documentation": "Indicates whether the job is terminated.
" - } } }, "traits": { @@ -5425,7 +5422,7 @@ "attemptDurationSeconds": { "target": "com.amazonaws.batch#Integer", "traits": { - "smithy.api#documentation": "The job timeout time (in seconds) that's measured from the job attempt's startedAt
timestamp. After\n this time passes, Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60\n seconds.
For array jobs, the timeout applies to the child jobs, not to the parent array job.
\nFor multi-node parallel (MNP) jobs, the timeout applies to the whole job, not to the individual nodes.
" + "smithy.api#documentation": "The job timeout time (in seconds) that's measured from the job attempt's startedAt
timestamp. After\n this time passes, Batch terminates your jobs if they aren't finished. The minimum value for the timeout is 60\n seconds.
A domain name associated with a distribution.
" + "smithy.api#documentation": "A domain name associated with a distribution.
" } }, "ICPRecordalStatus": { "target": "com.amazonaws.cloudfront#ICPRecordalStatus", "traits": { - "smithy.api#documentation": "The Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus\n\t\t\tis set to APPROVED for all CNAMEs (aliases) in regions outside of China.
\nThe status values returned are the following:
\n\n APPROVED indicates that the associated CNAME\n\t\t\t\t\thas a valid ICP recordal number. Multiple CNAMEs can be associated with a\n\t\t\t\t\tdistribution, and CNAMEs can correspond to different ICP recordals. To be marked\n\t\t\t\t\tas APPROVED, that is, valid to use with China region, a CNAME must have one ICP\n\t\t\t\t\trecordal number associated with it.
\n\n SUSPENDED indicates that the associated CNAME\n\t\t\t\t\tdoes not have a valid ICP recordal number.
\n\n PENDING indicates that CloudFront can't determine\n\t\t\t\t\tthe ICP recordal status of the CNAME associated with the distribution because\n\t\t\t\t\tthere was an error in trying to determine the status. You can try again to see\n\t\t\t\t\tif the error is resolved in which case CloudFront returns an APPROVED or SUSPENDED\n\t\t\t\t\tstatus.
\nThe Internet Content Provider (ICP) recordal status for a CNAME. The ICPRecordalStatus\n\t\t\tis set to APPROVED for all CNAMEs (aliases) in regions outside of China.
\n\t\tThe status values returned are the following:
\n\t\t\n APPROVED indicates that the associated CNAME\n\t\t\t\t\thas a valid ICP recordal number. Multiple CNAMEs can be associated with a\n\t\t\t\t\tdistribution, and CNAMEs can correspond to different ICP recordals. To be marked\n\t\t\t\t\tas APPROVED, that is, valid to use with China region, a CNAME must have one ICP\n\t\t\t\t\trecordal number associated with it.
\n\t\t\t\n SUSPENDED indicates that the associated CNAME\n\t\t\t\t\tdoes not have a valid ICP recordal number.
\n\t\t\t\n PENDING indicates that CloudFront can't determine\n\t\t\t\t\tthe ICP recordal status of the CNAME associated with the distribution because\n\t\t\t\t\tthere was an error in trying to determine the status. You can try again to see\n\t\t\t\t\tif the error is resolved in which case CloudFront returns an APPROVED or SUSPENDED\n\t\t\t\t\tstatus.
\n\t\t\tAmazon Web Services services in China customers must file for an Internet Content Provider (ICP)\n\t\t\trecordal if they want to serve content publicly on an alternate domain name, also known\n\t\t\tas a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal\n\t\t\tstatus for CNAMEs associated with distributions. The status is returned in the CloudFront\n\t\t\tresponse; you can't configure it yourself.
\nFor more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with Amazon Web Services\n\t\t\t\tservices in China.
" + "smithy.api#documentation": "Amazon Web Services services in China customers must file for an Internet Content Provider (ICP)\n\t\t\trecordal if they want to serve content publicly on an alternate domain name, also known\n\t\t\tas a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal\n\t\t\tstatus for CNAMEs associated with distributions. The status is returned in the CloudFront\n\t\t\tresponse; you can't configure it yourself.
\n\t\tFor more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with Amazon Web Services\n\t\t\t\tservices in China.
" } }, "com.amazonaws.cloudfront#AliasICPRecordals": { @@ -190,7 +190,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that contains information about CNAMEs (alternate domain names), if\n\t\t\tany, for this distribution.
" + "smithy.api#documentation": "A complex type that contains information about CNAMEs (alternate domain names), if\n\t\t\tany, for this distribution.
" } }, "com.amazonaws.cloudfront#AllowedMethods": { @@ -215,7 +215,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that controls which HTTP methods CloudFront processes and forwards to your\n\t\t\tAmazon S3 bucket or your custom origin. There are three choices:
\nCloudFront forwards only GET
and HEAD
requests.
CloudFront forwards only GET
, HEAD
, and\n\t\t\t\t\t\tOPTIONS
requests.
CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST
, and\n\t\t\t\t\t\tDELETE
requests.
If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or\n\t\t\tto your custom origin so users can't perform operations that you don't want them to. For\n\t\t\texample, you might not want users to have permissions to delete objects from your\n\t\t\torigin.
" + "smithy.api#documentation": "A complex type that controls which HTTP methods CloudFront processes and forwards to your\n\t\t\tAmazon S3 bucket or your custom origin. There are three choices:
\n\t\tCloudFront forwards only GET
and HEAD
requests.
CloudFront forwards only GET
, HEAD
, and\n\t\t\t\t\t\tOPTIONS
requests.
CloudFront forwards GET, HEAD, OPTIONS, PUT, PATCH, POST
, and\n\t\t\t\t\t\tDELETE
requests.
If you pick the third choice, you may need to restrict access to your Amazon S3 bucket or\n\t\t\tto your custom origin so users can't perform operations that you don't want them to. For\n\t\t\texample, you might not want users to have permissions to delete objects from your\n\t\t\torigin.
" } }, "com.amazonaws.cloudfront#AssociateAlias": { @@ -244,7 +244,7 @@ } ], "traits": { - "smithy.api#documentation": "Associates an alias (also known as a CNAME or an alternate domain name) with a CloudFront\n\t\t\tdistribution.
\nWith this operation you can move an alias that's already in use on a CloudFront distribution\n\t\t\tto a different distribution in one step. This prevents the downtime that could occur if\n\t\t\tyou first remove the alias from one distribution and then separately add the alias to\n\t\t\tanother distribution.
\nTo use this operation to associate an alias with a distribution, you provide the alias\n\t\t\tand the ID of the target distribution for the alias. For more information, including how\n\t\t\tto set up the target distribution, prerequisites that you must complete, and other\n\t\t\trestrictions, see Moving an alternate domain name to a different\n\t\t\t\tdistribution in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Associates an alias (also known as a CNAME or an alternate domain name) with a CloudFront\n\t\t\tdistribution.
\n\t\tWith this operation you can move an alias that’s already in use on a CloudFront distribution\n\t\t\tto a different distribution in one step. This prevents the downtime that could occur if\n\t\t\tyou first remove the alias from one distribution and then separately add the alias to\n\t\t\tanother distribution.
\n\t\tTo use this operation to associate an alias with a distribution, you provide the alias\n\t\t\tand the ID of the target distribution for the alias. For more information, including how\n\t\t\tto set up the target distribution, prerequisites that you must complete, and other\n\t\t\trestrictions, see Moving an alternate domain name to a different\n\t\t\t\tdistribution in the Amazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-05-31/distribution/{TargetDistributionId}/associate-alias", @@ -258,7 +258,7 @@ "TargetDistributionId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The ID of the distribution that you're associating the alias with.
", + "smithy.api#documentation": "The ID of the distribution that you’re associating the alias with.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -314,7 +314,7 @@ "PathPattern": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The pattern (for example, images/*.jpg
) that specifies which requests to\n\t\t\tapply the behavior to. When CloudFront receives a viewer request, the requested path is\n\t\t\tcompared with path patterns in the order in which cache behaviors are listed in the\n\t\t\tdistribution.
You can optionally include a slash (/
) at the beginning of the path\n\t\t\t\tpattern. For example, /images/*.jpg
. CloudFront behavior is the same with or\n\t\t\t\twithout the leading /
.
The path pattern for the default cache behavior is *
and cannot be\n\t\t\tchanged. If the request for an object does not match the path pattern for any cache\n\t\t\tbehaviors, CloudFront applies the behavior in the default cache behavior.
For more information, see Path Pattern in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The pattern (for example, images/*.jpg
) that specifies which requests to\n\t\t\tapply the behavior to. When CloudFront receives a viewer request, the requested path is\n\t\t\tcompared with path patterns in the order in which cache behaviors are listed in the\n\t\t\tdistribution.
You can optionally include a slash (/
) at the beginning of the path\n\t\t\t\tpattern. For example, /images/*.jpg
. CloudFront behavior is the same with or\n\t\t\t\twithout the leading /
.
The path pattern for the default cache behavior is *
and cannot be\n\t\t\tchanged. If the request for an object does not match the path pattern for any cache\n\t\t\tbehaviors, CloudFront applies the behavior in the default cache behavior.
For more information, see Path Pattern in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, @@ -328,19 +328,19 @@ "TrustedSigners": { "target": "com.amazonaws.cloudfront#TrustedSigners", "traits": { - "smithy.api#documentation": "We recommend using TrustedKeyGroups
instead of\n\t\t\t\t\tTrustedSigners
.
A list of Amazon Web Services account IDs whose public keys CloudFront can use to validate signed URLs or\n\t\t\tsigned cookies.
\nWhen a cache behavior contains trusted signers, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with the private key of a CloudFront key pair in the trusted signer's Amazon Web Services account.\n\t\t\tThe signed URL or cookie contains information about which public key CloudFront should use to\n\t\t\tverify the signature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "We recommend using TrustedKeyGroups
instead of\n\t\t\t\t\tTrustedSigners
.
A list of Amazon Web Services account IDs whose public keys CloudFront can use to validate signed URLs or\n\t\t\tsigned cookies.
\n\t\tWhen a cache behavior contains trusted signers, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with the private key of a CloudFront key pair in the trusted signer’s Amazon Web Services account.\n\t\t\tThe signed URL or cookie contains information about which public key CloudFront should use to\n\t\t\tverify the signature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "TrustedKeyGroups": { "target": "com.amazonaws.cloudfront#TrustedKeyGroups", "traits": { - "smithy.api#documentation": "A list of key groups that CloudFront can use to validate signed URLs or signed\n\t\t\tcookies.
\nWhen a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with a private key whose corresponding public key is in the key group. The signed\n\t\t\tURL or cookie contains information about which public key CloudFront should use to verify the\n\t\t\tsignature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A list of key groups that CloudFront can use to validate signed URLs or signed\n\t\t\tcookies.
\n\t\tWhen a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with a private key whose corresponding public key is in the key group. The signed\n\t\t\tURL or cookie contains information about which public key CloudFront should use to verify the\n\t\t\tsignature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "ViewerProtocolPolicy": { "target": "com.amazonaws.cloudfront#ViewerProtocolPolicy", "traits": { - "smithy.api#documentation": "The protocol that viewers can use to access the files in the origin specified by\n\t\t\t\tTargetOriginId
when a request matches the path pattern in\n\t\t\t\tPathPattern
. You can specify the following options:
\n allow-all
: Viewers can use HTTP or HTTPS.
\n redirect-to-https
: If a viewer submits an HTTP request, CloudFront\n\t\t\t\t\treturns an HTTP status code of 301 (Moved Permanently) to the viewer along with\n\t\t\t\t\tthe HTTPS URL. The viewer then resubmits the request using the new URL.
\n https-only
: If a viewer sends an HTTP request, CloudFront returns an\n\t\t\t\t\tHTTP status code of 403 (Forbidden).
For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nThe only way to guarantee that viewers retrieve an object that was fetched from\n\t\t\t\tthe origin using HTTPS is never to use any other protocol to fetch the object. If\n\t\t\t\tyou have recently changed from HTTP to HTTPS, we recommend that you clear your\n\t\t\t\tobjects' cache because cached objects are protocol agnostic. That means that an edge\n\t\t\t\tlocation will return an object from the cache regardless of whether the current\n\t\t\t\trequest protocol matches the protocol used previously. For more information, see\n\t\t\t\t\tManaging Cache\n\t\t\t\t\tExpiration in the Amazon CloudFront Developer Guide.
\nThe protocol that viewers can use to access the files in the origin specified by\n\t\t\t\tTargetOriginId
when a request matches the path pattern in\n\t\t\t\tPathPattern
. You can specify the following options:
\n allow-all
: Viewers can use HTTP or HTTPS.
\n redirect-to-https
: If a viewer submits an HTTP request, CloudFront\n\t\t\t\t\treturns an HTTP status code of 301 (Moved Permanently) to the viewer along with\n\t\t\t\t\tthe HTTPS URL. The viewer then resubmits the request using the new URL.
\n https-only
: If a viewer sends an HTTP request, CloudFront returns an\n\t\t\t\t\tHTTP status code of 403 (Forbidden).
For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tThe only way to guarantee that viewers retrieve an object that was fetched from\n\t\t\t\tthe origin using HTTPS is never to use any other protocol to fetch the object. If\n\t\t\t\tyou have recently changed from HTTP to HTTPS, we recommend that you clear your\n\t\t\t\tobjects’ cache because cached objects are protocol agnostic. That means that an edge\n\t\t\t\tlocation will return an object from the cache regardless of whether the current\n\t\t\t\trequest protocol matches the protocol used previously. For more information, see\n\t\t\t\t\tManaging Cache\n\t\t\t\t\tExpiration in the Amazon CloudFront Developer Guide.
\n\t\tIndicates whether you want to distribute media files in the Microsoft Smooth Streaming\n\t\t\tformat using the origin that is associated with this cache behavior. If so, specify\n\t\t\t\ttrue
; if not, specify false
. If you specify\n\t\t\t\ttrue
for SmoothStreaming
, you can still distribute other\n\t\t\tcontent using this cache behavior if the content matches the value of\n\t\t\t\tPathPattern
.
Indicates whether you want to distribute media files in the Microsoft Smooth Streaming\n\t\t\tformat using the origin that is associated with this cache behavior. If so, specify\n\t\t\t\ttrue
; if not, specify false
. If you specify\n\t\t\t\ttrue
for SmoothStreaming
, you can still distribute other\n\t\t\tcontent using this cache behavior if the content matches the value of\n\t\t\t\tPathPattern
.
The unique identifier of the cache policy that is attached to this cache behavior. For\n\t\t\tmore information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nA CacheBehavior
must include either a CachePolicyId
or\n\t\t\t\tForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
The unique identifier of the cache policy that is attached to this cache behavior. For\n\t\t\tmore information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA CacheBehavior
must include either a CachePolicyId
or\n\t\t\t\tForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field. For more information, see Working with policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nIf you want to include values in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nIf you want to send values to the origin but not include them in the cache key, use an\n\t\t\torigin request policy. For more information, see Creating origin request policies or Using the managed origin request policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nA CacheBehavior
must include either a CachePolicyId
or\n\t\t\t\tForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
A complex type that specifies how CloudFront handles query strings, cookies, and HTTP\n\t\t\theaders.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field. For more information, see Working with policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you want to include values in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you want to send values to the origin but not include them in the cache key, use an\n\t\t\torigin request policy. For more information, see Creating origin request policies or Using the managed origin request policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA CacheBehavior
must include either a CachePolicyId
or\n\t\t\t\tForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
A complex type that specifies how CloudFront handles query strings, cookies, and HTTP\n\t\t\theaders.
" } }, "MinTTL": { "target": "com.amazonaws.cloudfront#long", "traits": { "smithy.api#deprecated": {}, - "smithy.api#documentation": "This field is deprecated. We recommend that you use the MinTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. For more information, see Managing How Long\n\t\t\t\tContent Stays in an Edge Cache (Expiration) in the \n\t\t\t\tAmazon CloudFront Developer Guide.
\nYou must specify 0
for MinTTL
if you configure CloudFront to\n\t\t\tforward all headers to your origin (under Headers
, if you specify\n\t\t\t\t1
for Quantity
and *
for\n\t\t\tName
).
This field is deprecated. We recommend that you use the MinTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. For more information, see Managing How Long\n\t\t\t\tContent Stays in an Edge Cache (Expiration) in the \n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tYou must specify 0
for MinTTL
if you configure CloudFront to\n\t\t\tforward all headers to your origin (under Headers
, if you specify\n\t\t\t\t1
for Quantity
and *
for\n\t\t\tName
).
This field is deprecated. We recommend that you use the DefaultTTL
field\n\t\t\tin a cache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin does not add HTTP\n\t\t\theaders such as Cache-Control max-age
, Cache-Control s-maxage
,\n\t\t\tand Expires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field is deprecated. We recommend that you use the DefaultTTL
field\n\t\t\tin a cache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin does not add HTTP\n\t\t\theaders such as Cache-Control max-age
, Cache-Control s-maxage
,\n\t\t\tand Expires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field is deprecated. We recommend that you use the MaxTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin adds HTTP headers such\n\t\t\tas Cache-Control max-age
, Cache-Control s-maxage
, and\n\t\t\t\tExpires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field is deprecated. We recommend that you use the MaxTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin adds HTTP headers such\n\t\t\tas Cache-Control max-age
, Cache-Control s-maxage
, and\n\t\t\t\tExpires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
A complex type that describes how CloudFront processes requests.
\nYou must create at least as many cache behaviors (including the default cache\n\t\t\tbehavior) as you have origins if you want CloudFront to serve objects from all of the origins.\n\t\t\tEach cache behavior specifies the one origin from which you want CloudFront to get objects. If\n\t\t\tyou have two origins and only the default cache behavior, the default cache behavior\n\t\t\twill cause CloudFront to get objects from one of the origins, but the other origin is never\n\t\t\tused.
\nFor the current quota (formerly known as limit) on the number of cache behaviors that\n\t\t\tyou can add to a distribution, see Quotas in the\n\t\t\tAmazon CloudFront Developer Guide.
\nIf you don't want to specify any cache behaviors, include only an empty\n\t\t\t\tCacheBehaviors
element. Don't include an empty\n\t\t\t\tCacheBehavior
element because this is invalid.
To delete all cache behaviors in an existing distribution, update the distribution\n\t\t\tconfiguration and include only an empty CacheBehaviors
element.
To add, change, or remove one or more cache behaviors, update the distribution\n\t\t\tconfiguration and specify all of the cache behaviors that you want to include in the\n\t\t\tupdated distribution.
\nFor more information about cache behaviors, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that describes how CloudFront processes requests.
\n\t\tYou must create at least as many cache behaviors (including the default cache\n\t\t\tbehavior) as you have origins if you want CloudFront to serve objects from all of the origins.\n\t\t\tEach cache behavior specifies the one origin from which you want CloudFront to get objects. If\n\t\t\tyou have two origins and only the default cache behavior, the default cache behavior\n\t\t\twill cause CloudFront to get objects from one of the origins, but the other origin is never\n\t\t\tused.
\n\t\tFor the current quota (formerly known as limit) on the number of cache behaviors that\n\t\t\tyou can add to a distribution, see Quotas in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you don’t want to specify any cache behaviors, include only an empty\n\t\t\t\tCacheBehaviors
element. Don’t include an empty\n\t\t\t\tCacheBehavior
element because this is invalid.
To delete all cache behaviors in an existing distribution, update the distribution\n\t\t\tconfiguration and include only an empty CacheBehaviors
element.
To add, change, or remove one or more cache behaviors, update the distribution\n\t\t\tconfiguration and specify all of the cache behaviors that you want to include in the\n\t\t\tupdated distribution.
\n\t\tFor more information about cache behaviors, see Cache Behavior Settings in the Amazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#CacheBehaviorList": { @@ -449,7 +449,7 @@ "Quantity": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The number of cache behaviors for this distribution.
", + "smithy.api#documentation": "The number of cache behaviors for this distribution.
", "smithy.api#required": {} } }, @@ -461,7 +461,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that contains zero or more CacheBehavior
elements.
A complex type that contains zero or more CacheBehavior
elements.
A cache policy.
\nWhen it's attached to a cache behavior, the cache policy determines the\n\t\t\tfollowing:
\nThe values that CloudFront includes in the cache key. These values can include HTTP\n\t\t\t\t\theaders, cookies, and URL query strings. CloudFront uses the cache key to find an\n\t\t\t\t\tobject in its cache that it can return to the viewer.
\nThe default, minimum, and maximum time to live (TTL) values that you want\n\t\t\t\t\tobjects to stay in the CloudFront cache.
\nThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can't find a valid object in its cache that matches the request's cache key. If\n\t\t\tyou want to send values to the origin but not include them in the\n\t\t\tcache key, use OriginRequestPolicy
.
A cache policy.
\n\t\tWhen it’s attached to a cache behavior, the cache policy determines the\n\t\t\tfollowing:
\n\t\tThe values that CloudFront includes in the cache key. These values can include HTTP\n\t\t\t\t\theaders, cookies, and URL query strings. CloudFront uses the cache key to find an\n\t\t\t\t\tobject in its cache that it can return to the viewer.
\n\t\t\tThe default, minimum, and maximum time to live (TTL) values that you want\n\t\t\t\t\tobjects to stay in the CloudFront cache.
\n\t\t\tThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can’t find a valid object in its cache that matches the request’s cache key. If\n\t\t\tyou want to send values to the origin but not include them in the\n\t\t\tcache key, use OriginRequestPolicy
.
The default amount of time, in seconds, that you want objects to stay in the CloudFront\n\t\t\tcache before CloudFront sends another request to the origin to see if the object has been\n\t\t\tupdated. CloudFront uses this value as the object's time to live (TTL) only when the origin\n\t\t\tdoes not send Cache-Control
or Expires
\n\t\t\theaders with the object. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default value for this field is 86400 seconds (one day). If the value of\n\t\t\t\tMinTTL
is more than 86400 seconds, then the default value for this\n\t\t\tfield is the same as the value of MinTTL
.
The default amount of time, in seconds, that you want objects to stay in the CloudFront\n\t\t\tcache before CloudFront sends another request to the origin to see if the object has been\n\t\t\tupdated. CloudFront uses this value as the object’s time to live (TTL) only when the origin\n\t\t\tdoes not send Cache-Control
or Expires
\n\t\t\theaders with the object. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default value for this field is 86400 seconds (one day). If the value of\n\t\t\t\tMinTTL
is more than 86400 seconds, then the default value for this\n\t\t\tfield is the same as the value of MinTTL
.
The maximum amount of time, in seconds, that objects stay in the CloudFront cache before\n\t\t\tCloudFront sends another request to the origin to see if the object has been updated. CloudFront\n\t\t\tuses this value only when the origin sends Cache-Control
or\n\t\t\t\tExpires
headers with the object. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default value for this field is 31536000 seconds (one year). If the value of\n\t\t\t\tMinTTL
or DefaultTTL
is more than 31536000 seconds, then\n\t\t\tthe default value for this field is the same as the value of\n\t\t\tDefaultTTL
.
The maximum amount of time, in seconds, that objects stay in the CloudFront cache before\n\t\t\tCloudFront sends another request to the origin to see if the object has been updated. CloudFront\n\t\t\tuses this value only when the origin sends Cache-Control
or\n\t\t\t\tExpires
headers with the object. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default value for this field is 31536000 seconds (one year). If the value of\n\t\t\t\tMinTTL
or DefaultTTL
is more than 31536000 seconds, then\n\t\t\tthe default value for this field is the same as the value of\n\t\t\tDefaultTTL
.
A cache policy configuration.
\nThis configuration determines the following:
\nThe values that CloudFront includes in the cache key. These values can include HTTP\n\t\t\t\t\theaders, cookies, and URL query strings. CloudFront uses the cache key to find an\n\t\t\t\t\tobject in its cache that it can return to the viewer.
\nThe default, minimum, and maximum time to live (TTL) values that you want\n\t\t\t\t\tobjects to stay in the CloudFront cache.
\nThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can't find a valid object in its cache that matches the request's cache key. If\n\t\t\tyou want to send values to the origin but not include them in the\n\t\t\tcache key, use OriginRequestPolicy
.
A cache policy configuration.
\n\t\tThis configuration determines the following:
\n\t\tThe values that CloudFront includes in the cache key. These values can include HTTP\n\t\t\t\t\theaders, cookies, and URL query strings. CloudFront uses the cache key to find an\n\t\t\t\t\tobject in its cache that it can return to the viewer.
\n\t\t\tThe default, minimum, and maximum time to live (TTL) values that you want\n\t\t\t\t\tobjects to stay in the CloudFront cache.
\n\t\t\tThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can’t find a valid object in its cache that matches the request’s cache key. If\n\t\t\tyou want to send values to the origin but not include them in the\n\t\t\tcache key, use OriginRequestPolicy
.
Determines whether any cookies in viewer requests are included in the cache key and\n\t\t\tautomatically included in requests that CloudFront sends to the origin. Valid values\n\t\t\tare:
\n\n none
– Cookies in viewer requests are not included in the cache\n\t\t\t\t\tkey and are not automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin. Even when this field is set to none
, any cookies that are\n\t\t\t\t\tlisted in an OriginRequestPolicy
\n are included in origin requests.
\n whitelist
– The cookies in viewer requests that are listed in the\n\t\t\t\t\t\tCookieNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n allExcept
– All cookies in viewer requests that are\n\t\t\t\t\t\t\t\n not\n listed in the\n\t\t\t\t\t\tCookieNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n all
– All cookies in viewer requests are included in the cache\n\t\t\t\t\tkey and are automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin.
Determines whether any cookies in viewer requests are included in the cache key and\n\t\t\tautomatically included in requests that CloudFront sends to the origin. Valid values\n\t\t\tare:
\n\t\t\n none
– Cookies in viewer requests are not included in the cache\n\t\t\t\t\tkey and are not automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin. Even when this field is set to none
, any cookies that are\n\t\t\t\t\tlisted in an OriginRequestPolicy
\n\t\t\t\t\t are included in origin requests.
\n whitelist
– The cookies in viewer requests that are listed in the\n\t\t\t\t\t\tCookieNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n allExcept
– All cookies in viewer requests that are\n\t\t\t\t\t\t\t\n not\n listed in the\n\t\t\t\t\t\tCookieNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n all
– All cookies in viewer requests are included in the cache\n\t\t\t\t\tkey and are automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin.
Determines whether any HTTP headers are included in the cache key and automatically\n\t\t\tincluded in requests that CloudFront sends to the origin. Valid values are:
\n\n none
– HTTP headers are not included in the cache key and are not\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin. Even when this\n\t\t\t\t\tfield is set to none
, any headers that are listed in an\n\t\t\t\t\t\tOriginRequestPolicy
\n are included in origin requests.
\n whitelist
– The HTTP headers that are listed in the\n\t\t\t\t\t\tHeaders
type are included in the cache key and are\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
Determines whether any HTTP headers are included in the cache key and automatically\n\t\t\tincluded in requests that CloudFront sends to the origin. Valid values are:
\n\t\t\n none
– HTTP headers are not included in the cache key and are not\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin. Even when this\n\t\t\t\t\tfield is set to none
, any headers that are listed in an\n\t\t\t\t\t\tOriginRequestPolicy
\n\t\t\t\t\t are included in origin requests.
\n whitelist
– The HTTP headers that are listed in the\n\t\t\t\t\t\tHeaders
type are included in the cache key and are\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
Determines whether any URL query strings in viewer requests are included in the cache\n\t\t\tkey and automatically included in requests that CloudFront sends to the origin. Valid values\n\t\t\tare:
\n\n none
– Query strings in viewer requests are not included in the\n\t\t\t\t\tcache key and are not automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin. Even when this field is set to none
, any query strings that\n\t\t\t\t\tare listed in an OriginRequestPolicy
\n are included in origin requests.
\n whitelist
– The query strings in viewer requests that are listed\n\t\t\t\t\tin the QueryStringNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n allExcept
– All query strings in viewer requests that are\n\t\t\t\t\t\t\t\n not\n listed in the\n\t\t\t\t\t\tQueryStringNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n all
– All query strings in viewer requests are included in the\n\t\t\t\t\tcache key and are automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin.
Determines whether any URL query strings in viewer requests are included in the cache\n\t\t\tkey and automatically included in requests that CloudFront sends to the origin. Valid values\n\t\t\tare:
\n\t\t\n none
– Query strings in viewer requests are not included in the\n\t\t\t\t\tcache key and are not automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin. Even when this field is set to none
, any query strings that\n\t\t\t\t\tare listed in an OriginRequestPolicy
\n\t\t\t\t\t are included in origin requests.
\n whitelist
– The query strings in viewer requests that are listed\n\t\t\t\t\tin the QueryStringNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n allExcept
– All query strings in viewer requests that are\n\t\t\t\t\t\t\t\n not\n listed in the\n\t\t\t\t\t\tQueryStringNames
type are included in the cache key and\n\t\t\t\t\tautomatically included in requests that CloudFront sends to the origin.
\n all
– All query strings in viewer requests are included in the\n\t\t\t\t\tcache key and are automatically included in requests that CloudFront sends to the\n\t\t\t\t\torigin.
A complex type that controls whether CloudFront caches the response to requests using the\n\t\t\tspecified HTTP methods. There are two choices:
\nCloudFront caches responses to GET
and HEAD
\n\t\t\t\t\trequests.
CloudFront caches responses to GET
, HEAD
, and\n\t\t\t\t\t\tOPTIONS
requests.
If you pick the second choice for your Amazon S3 Origin, you may need to forward\n\t\t\tAccess-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for\n\t\t\tthe responses to be cached correctly.
" + "smithy.api#documentation": "A complex type that controls whether CloudFront caches the response to requests using the\n\t\t\tspecified HTTP methods. There are two choices:
\n\t\tCloudFront caches responses to GET
and HEAD
\n\t\t\t\t\trequests.
CloudFront caches responses to GET
, HEAD
, and\n\t\t\t\t\t\tOPTIONS
requests.
If you pick the second choice for your Amazon S3 Origin, you may need to forward\n\t\t\tAccess-Control-Request-Method, Access-Control-Request-Headers, and Origin headers for\n\t\t\tthe responses to be cached correctly.
" } }, "com.amazonaws.cloudfront#CannotChangeImmutablePublicKeyFields": { @@ -850,14 +850,14 @@ "S3CanonicalUserId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The Amazon S3 canonical user ID for the origin access identity, used when giving the origin\n\t\t\taccess identity read permission to an object in Amazon S3.
", + "smithy.api#documentation": "The Amazon S3 canonical user ID for the origin access identity, used when giving the origin\n\t\t\taccess identity read permission to an object in Amazon S3.
", "smithy.api#required": {} } }, "CloudFrontOriginAccessIdentityConfig": { "target": "com.amazonaws.cloudfront#CloudFrontOriginAccessIdentityConfig", "traits": { - "smithy.api#documentation": "The current configuration information for the identity.
" + "smithy.api#documentation": "The current configuration information for the identity.
" } } }, @@ -884,7 +884,7 @@ "CallerReference": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A unique value (for example, a date-time stamp) that ensures that the request can't be\n\t\t\treplayed.
\nIf the value of CallerReference
is new (regardless of the content of the\n\t\t\t\tCloudFrontOriginAccessIdentityConfig
object), a new origin access\n\t\t\tidentity is created.
If the CallerReference
is a value already sent in a previous identity\n\t\t\trequest, and the content of the CloudFrontOriginAccessIdentityConfig
is\n\t\t\tidentical to the original request (ignoring white space), the response includes the same\n\t\t\tinformation returned to the original request.
If the CallerReference
is a value you already sent in a previous request\n\t\t\tto create an identity, but the content of the\n\t\t\t\tCloudFrontOriginAccessIdentityConfig
is different from the original\n\t\t\trequest, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists
error.\n\t\t
A unique value (for example, a date-time stamp) that ensures that the request can't be\n\t\t\treplayed.
\n\t\tIf the value of CallerReference
is new (regardless of the content of the\n\t\t\t\tCloudFrontOriginAccessIdentityConfig
object), a new origin access\n\t\t\tidentity is created.
If the CallerReference
is a value already sent in a previous identity\n\t\t\trequest, and the content of the CloudFrontOriginAccessIdentityConfig
is\n\t\t\tidentical to the original request (ignoring white space), the response includes the same\n\t\t\tinformation returned to the original request.
If the CallerReference
is a value you already sent in a previous request\n\t\t\tto create an identity, but the content of the\n\t\t\t\tCloudFrontOriginAccessIdentityConfig
is different from the original\n\t\t\trequest, CloudFront returns a CloudFrontOriginAccessIdentityAlreadyExists
error.\n\t\t
Origin access identity configuration. Send a GET
request to the\n\t\t\t\t\t/CloudFront API version/CloudFront/identity ID/config
\n\t\t\tresource.
Origin access identity configuration. Send a GET
request to the\n\t\t\t\t\t/CloudFront API version/CloudFront/identity ID/config
\n\t\t\tresource.
Use this when paginating results to indicate where to begin in your list of origin\n\t\t\taccess identities. The results include identities in the list that occur after the\n\t\t\tmarker. To get the next page of results, set the Marker
to the value of the\n\t\t\t\tNextMarker
from the current page's response (which is also the ID of\n\t\t\tthe last identity on that page).
Use this when paginating results to indicate where to begin in your list of origin\n\t\t\taccess identities. The results include identities in the list that occur after the\n\t\t\tmarker. To get the next page of results, set the Marker
to the value of the\n\t\t\t\tNextMarker
from the current page's response (which is also the ID of\n\t\t\tthe last identity on that page).
If IsTruncated
is true
, this element is present and contains\n\t\t\tthe value you can use for the Marker
request parameter to continue listing\n\t\t\tyour origin access identities where they left off.
If IsTruncated
is true
, this element is present and contains\n\t\t\tthe value you can use for the Marker
request parameter to continue listing\n\t\t\tyour origin access identities where they left off.
The maximum number of origin access identities you want in the response body.
", + "smithy.api#documentation": "The maximum number of origin access identities you want in the response body.
", "smithy.api#required": {} } }, @@ -1334,7 +1334,7 @@ "name": "cloudfront" }, "aws.protocols#restXml": {}, - "smithy.api#documentation": "This is the Amazon CloudFront API Reference. This guide is for developers\n\t\t\twho need detailed information about CloudFront API actions, data types, and errors. For\n\t\t\tdetailed information about CloudFront features, see the\n\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "This is the Amazon CloudFront API Reference. This guide is for developers\n\t\t\twho need detailed information about CloudFront API actions, data types, and errors. For\n\t\t\tdetailed information about CloudFront features, see the\n\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#title": "Amazon CloudFront", "smithy.api#xmlNamespace": { "uri": "http://cloudfront.amazonaws.com/doc/2020-05-31/" @@ -2200,9 +2200,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "cn-northwest-1", + "name": "sigv4", "signingName": "cloudfront", - "name": "sigv4" + "signingRegion": "cn-northwest-1" } ] }, @@ -2211,8 +2211,8 @@ }, "params": { "UseFIPS": false, - "Region": "aws-cn-global", - "UseDualStack": false + "UseDualStack": false, + "Region": "aws-cn-global" } }, { @@ -2222,9 +2222,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", + "name": "sigv4", "signingName": "cloudfront", - "name": "sigv4" + "signingRegion": "us-east-1" } ] }, @@ -2233,8 +2233,8 @@ }, "params": { "UseFIPS": false, - "Region": "aws-global", - "UseDualStack": false + "UseDualStack": false, + "Region": "aws-global" } }, { @@ -2246,8 +2246,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -2258,8 +2258,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -2270,8 +2270,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } } @@ -2304,12 +2304,12 @@ "AccountId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The (partially hidden) ID of the Amazon Web Services account that owns the distribution that's\n\t\t\tassociated with the alias.
" + "smithy.api#documentation": "The (partially hidden) ID of the Amazon Web Services account that owns the distribution that’s\n\t\t\tassociated with the alias.
" } } }, "traits": { - "smithy.api#documentation": "An alias (also called a CNAME) and the CloudFront distribution and Amazon Web Services account ID that\n\t\t\tit's associated with. The distribution and account IDs are partially hidden, which\n\t\t\tallows you to identify the distributions and accounts that you own, but helps to protect\n\t\t\tthe information of ones that you don't own.
" + "smithy.api#documentation": "An alias (also called a CNAME) and the CloudFront distribution and Amazon Web Services account ID that\n\t\t\tit’s associated with. The distribution and account IDs are partially hidden, which\n\t\t\tallows you to identify the distributions and accounts that you own, but helps to protect\n\t\t\tthe information of ones that you don’t own.
" } }, "com.amazonaws.cloudfront#ConflictingAliases": { @@ -2350,7 +2350,7 @@ } }, "traits": { - "smithy.api#documentation": "A list of aliases (also called CNAMEs) and the CloudFront distributions and Amazon Web Services accounts\n\t\t\tthat they are associated with. In the list, the distribution and account IDs are\n\t\t\tpartially hidden, which allows you to identify the distributions and accounts that you\n\t\t\town, but helps to protect the information of ones that you don't own.
" + "smithy.api#documentation": "A list of aliases (also called CNAMEs) and the CloudFront distributions and Amazon Web Services accounts\n\t\t\tthat they are associated with. In the list, the distribution and account IDs are\n\t\t\tpartially hidden, which allows you to identify the distributions and accounts that you\n\t\t\town, but helps to protect the information of ones that you don’t own.
" } }, "com.amazonaws.cloudfront#ContentTypeProfile": { @@ -2359,26 +2359,26 @@ "Format": { "target": "com.amazonaws.cloudfront#Format", "traits": { - "smithy.api#documentation": "The format for a field-level encryption content type-profile mapping.
", + "smithy.api#documentation": "The format for a field-level encryption content type-profile mapping.
", "smithy.api#required": {} } }, "ProfileId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The profile ID for a field-level encryption content type-profile mapping.
" + "smithy.api#documentation": "The profile ID for a field-level encryption content type-profile mapping.
" } }, "ContentType": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The content type for a field-level encryption content type-profile mapping.
", + "smithy.api#documentation": "The content type for a field-level encryption content type-profile mapping.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A field-level encryption content type profile.
" + "smithy.api#documentation": "A field-level encryption content type profile.
" } }, "com.amazonaws.cloudfront#ContentTypeProfileConfig": { @@ -2387,19 +2387,19 @@ "ForwardWhenContentTypeIsUnknown": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "The setting in a field-level encryption content type-profile mapping that specifies\n\t\t\twhat to do when an unknown content type is provided for the profile. If true, content is\n\t\t\tforwarded without being encrypted when the content type is unknown. If false (the\n\t\t\tdefault), an error is returned when the content type is unknown.
", + "smithy.api#documentation": "The setting in a field-level encryption content type-profile mapping that specifies\n\t\t\twhat to do when an unknown content type is provided for the profile. If true, content is\n\t\t\tforwarded without being encrypted when the content type is unknown. If false (the\n\t\t\tdefault), an error is returned when the content type is unknown.
", "smithy.api#required": {} } }, "ContentTypeProfiles": { "target": "com.amazonaws.cloudfront#ContentTypeProfiles", "traits": { - "smithy.api#documentation": "The configuration for a field-level encryption content type-profile.
" + "smithy.api#documentation": "The configuration for a field-level encryption content type-profile.
" } } }, "traits": { - "smithy.api#documentation": "The configuration for a field-level encryption content type-profile mapping.
" + "smithy.api#documentation": "The configuration for a field-level encryption content type-profile mapping.
" } }, "com.amazonaws.cloudfront#ContentTypeProfileList": { @@ -2417,19 +2417,19 @@ "Quantity": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The number of field-level encryption content type-profile mappings.
", + "smithy.api#documentation": "The number of field-level encryption content type-profile mappings.
", "smithy.api#required": {} } }, "Items": { "target": "com.amazonaws.cloudfront#ContentTypeProfileList", "traits": { - "smithy.api#documentation": "Items in a field-level encryption content type-profile mapping.
" + "smithy.api#documentation": "Items in a field-level encryption content type-profile mapping.
" } } }, "traits": { - "smithy.api#documentation": "Field-level encryption content type-profile.
" + "smithy.api#documentation": "Field-level encryption content type-profile.
" } }, "com.amazonaws.cloudfront#ContinuousDeploymentPolicy": { @@ -2595,7 +2595,7 @@ "Header": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The request header name that you want CloudFront to send to your staging\n\t\t\tdistribution. The header must contain the prefix aws-cf-cd-
.
The request header name that you want CloudFront to send to your staging\n\t\t\tdistribution.
", "smithy.api#required": {} } }, @@ -2665,19 +2665,19 @@ "Forward": { "target": "com.amazonaws.cloudfront#ItemSelection", "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include cookies in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send cookies to the origin but not include them in the cache key, use\n\t\t\torigin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nSpecifies which cookies to forward to the origin for this cache behavior: all, none,\n\t\t\tor the list of cookies specified in the WhitelistedNames
complex\n\t\t\ttype.
Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an\n\t\t\tAmazon S3 origin, specify none for the Forward
element.
This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include cookies in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send cookies to the origin but not include them in the cache key, use\n\t\t\torigin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tSpecifies which cookies to forward to the origin for this cache behavior: all, none,\n\t\t\tor the list of cookies specified in the WhitelistedNames
complex\n\t\t\ttype.
Amazon S3 doesn't process cookies. When the cache behavior is forwarding requests to an\n\t\t\tAmazon S3 origin, specify none for the Forward
element.
This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include cookies in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send cookies to the origin but not include them in the cache key, use\n\t\t\tan origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nRequired if you specify whitelist
for the value of Forward
.\n\t\t\tA complex type that specifies how many different cookies you want CloudFront to forward to the\n\t\t\torigin for this cache behavior and, if you want to forward selected cookies, the names\n\t\t\tof those cookies.
If you specify all
or none
for the value of\n\t\t\t\tForward
, omit WhitelistedNames
. If you change the value of\n\t\t\t\tForward
from whitelist
to all
or\n\t\t\t\tnone
and you don't delete the WhitelistedNames
element and\n\t\t\tits child elements, CloudFront deletes them automatically.
For the current limit on the number of cookie names that you can whitelist for each\n\t\t\tcache behavior, see CloudFront\n\t\t\t\tLimits in the Amazon Web Services General Reference.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include cookies in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send cookies to the origin but not include them in the cache key, use\n\t\t\tan origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tRequired if you specify whitelist
for the value of Forward
.\n\t\t\tA complex type that specifies how many different cookies you want CloudFront to forward to the\n\t\t\torigin for this cache behavior and, if you want to forward selected cookies, the names\n\t\t\tof those cookies.
If you specify all
or none
for the value of\n\t\t\t\tForward
, omit WhitelistedNames
. If you change the value of\n\t\t\t\tForward
from whitelist
to all
or\n\t\t\t\tnone
and you don't delete the WhitelistedNames
element and\n\t\t\tits child elements, CloudFront deletes them automatically.
For the current limit on the number of cookie names that you can whitelist for each\n\t\t\tcache behavior, see CloudFront\n\t\t\t\tLimits in the Amazon Web Services General Reference.
" } } }, "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include cookies in the cache key, use CookiesConfig
in a\n\t\t\tcache policy. See CachePolicy
.
If you want to send cookies to the origin but not include them in the cache key, use\n\t\t\t\tCookiesConfig
in an origin request policy. See\n\t\t\t\tOriginRequestPolicy
.
A complex type that specifies whether you want CloudFront to forward cookies to the origin\n\t\t\tand, if so, which ones. For more information about forwarding cookies to the origin, see\n\t\t\t\tCaching Content Based on\n\t\t\t\tCookies in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include cookies in the cache key, use CookiesConfig
in a\n\t\t\tcache policy. See CachePolicy
.
If you want to send cookies to the origin but not include them in the cache key, use\n\t\t\t\tCookiesConfig
in an origin request policy. See\n\t\t\t\tOriginRequestPolicy
.
A complex type that specifies whether you want CloudFront to forward cookies to the origin\n\t\t\tand, if so, which ones. For more information about forwarding cookies to the origin, see\n\t\t\t\tCaching Content Based on\n\t\t\t\tCookies in the Amazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#CopyDistribution": { @@ -2880,7 +2880,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a staging distribution using the configuration of the provided primary\n\t\t\tdistribution. A staging distribution is a copy of an existing distribution (called the\n\t\t\tprimary distribution) that you can use in a continuous deployment workflow.
\nAfter you create a staging distribution, you can use UpdateDistribution
\n\t\t\tto modify the staging distribution's configuration. Then you can use\n\t\t\t\tCreateContinuousDeploymentPolicy
to incrementally move traffic to the\n\t\t\tstaging distribution.
Creates a staging distribution using the configuration of the provided primary\n\t\t\tdistribution. A staging distribution is a copy of an existing distribution (called the\n\t\t\tprimary distribution) that you can use in a continuous deployment workflow.
\n\t\tAfter you create a staging distribution, you can use UpdateDistribution
\n\t\t\tto modify the staging distribution’s configuration. Then you can use\n\t\t\t\tCreateContinuousDeploymentPolicy
to incrementally move traffic to the\n\t\t\tstaging distribution.
Creates a cache policy.
\nAfter you create a cache policy, you can attach it to one or more cache behaviors.\n\t\t\tWhen it's attached to a cache behavior, the cache policy determines the\n\t\t\tfollowing:
\nThe values that CloudFront includes in the cache key. These\n\t\t\t\t\tvalues can include HTTP headers, cookies, and URL query strings. CloudFront uses the\n\t\t\t\t\tcache key to find an object in its cache that it can return to the\n\t\t\t\t\tviewer.
\nThe default, minimum, and maximum time to live (TTL) values that you want\n\t\t\t\t\tobjects to stay in the CloudFront cache.
\nThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can't find an object in its cache that matches the request's cache key. If you\n\t\t\twant to send values to the origin but not include them in the cache\n\t\t\tkey, use OriginRequestPolicy
.
For more information about cache policies, see Controlling the cache key in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Creates a cache policy.
\n\t\tAfter you create a cache policy, you can attach it to one or more cache behaviors.\n\t\t\tWhen it’s attached to a cache behavior, the cache policy determines the\n\t\t\tfollowing:
\n\t\tThe values that CloudFront includes in the cache key. These\n\t\t\t\t\tvalues can include HTTP headers, cookies, and URL query strings. CloudFront uses the\n\t\t\t\t\tcache key to find an object in its cache that it can return to the\n\t\t\t\t\tviewer.
\n\t\t\tThe default, minimum, and maximum time to live (TTL) values that you want\n\t\t\t\t\tobjects to stay in the CloudFront cache.
\n\t\t\tThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can’t find an object in its cache that matches the request’s cache key. If you\n\t\t\twant to send values to the origin but not include them in the cache\n\t\t\tkey, use OriginRequestPolicy
.
For more information about cache policies, see Controlling the cache key in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/cache-policy", @@ -3078,7 +3078,7 @@ } }, "traits": { - "smithy.api#documentation": "The request to create a new origin access identity (OAI). An origin access identity is\n\t\t\ta special CloudFront user that you can associate with Amazon S3 origins, so that you can secure all\n\t\t\tor just some of your Amazon S3 content. For more information, see Restricting Access to Amazon S3 Content by Using an Origin Access Identity in\n\t\t\tthe Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The request to create a new origin access identity (OAI). An origin access identity is\n\t\t\ta special CloudFront user that you can associate with Amazon S3 origins, so that you can secure all\n\t\t\tor just some of your Amazon S3 content. For more information, see Restricting Access to Amazon S3 Content by Using an Origin Access Identity in\n\t\t\tthe Amazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#CreateCloudFrontOriginAccessIdentityResult": { @@ -3139,7 +3139,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a continuous deployment policy that distributes traffic for a custom domain\n\t\t\tname to two different CloudFront distributions.
\nTo use a continuous deployment policy, first use CopyDistribution
to\n\t\t\tcreate a staging distribution, then use UpdateDistribution
to modify the\n\t\t\tstaging distribution's configuration.
After you create and update a staging distribution, you can use a continuous\n\t\t\tdeployment policy to incrementally move traffic to the staging distribution. This\n\t\t\tworkflow enables you to test changes to a distribution's configuration before moving all\n\t\t\tof your domain's production traffic to the new configuration.
", + "smithy.api#documentation": "Creates a continuous deployment policy that distributes traffic for a custom domain\n\t\t\tname to two different CloudFront distributions.
\n\t\tTo use a continuous deployment policy, first use CopyDistribution
to\n\t\t\tcreate a staging distribution, then use UpdateDistribution
to modify the\n\t\t\tstaging distribution’s configuration.
After you create and update a staging distribution, you can use a continuous\n\t\t\tdeployment policy to incrementally move traffic to the staging distribution. This\n\t\t\tworkflow enables you to test changes to a distribution’s configuration before moving all\n\t\t\tof your domain’s production traffic to the new configuration.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/continuous-deployment-policy", @@ -3664,7 +3664,7 @@ "DistributionConfigWithTags": { "target": "com.amazonaws.cloudfront#DistributionConfigWithTags", "traits": { - "smithy.api#documentation": "The distribution's configuration information.
", + "smithy.api#documentation": "The distribution's configuration information.
", "smithy.api#httpPayload": {}, "smithy.api#required": {}, "smithy.api#xmlName": "DistributionConfigWithTags" @@ -3672,7 +3672,7 @@ } }, "traits": { - "smithy.api#documentation": "The request to create a new distribution with tags.
" + "smithy.api#documentation": "The request to create a new distribution with tags.
" } }, "com.amazonaws.cloudfront#CreateDistributionWithTagsResult": { @@ -3681,7 +3681,7 @@ "Distribution": { "target": "com.amazonaws.cloudfront#Distribution", "traits": { - "smithy.api#documentation": "The distribution's information.
", + "smithy.api#documentation": "The distribution's information.
", "smithy.api#httpPayload": {} } }, @@ -3701,7 +3701,7 @@ } }, "traits": { - "smithy.api#documentation": "The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#CreateFieldLevelEncryptionConfig": { @@ -3896,7 +3896,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a CloudFront function.
\nTo create a function, you provide the function code and some configuration information\n\t\t\tabout the function. The response contains an Amazon Resource Name (ARN) that uniquely\n\t\t\tidentifies the function.
\nWhen you create a function, it's in the DEVELOPMENT
stage. In this stage,\n\t\t\tyou can test the function with TestFunction
, and update it with\n\t\t\t\tUpdateFunction
.
When you're ready to use your function with a CloudFront distribution, use\n\t\t\t\tPublishFunction
to copy the function from the DEVELOPMENT
\n\t\t\tstage to LIVE
. When it's live, you can attach the function to a\n\t\t\tdistribution's cache behavior, using the function's ARN.
Creates a CloudFront function.
\n\t\tTo create a function, you provide the function code and some configuration information\n\t\t\tabout the function. The response contains an Amazon Resource Name (ARN) that uniquely\n\t\t\tidentifies the function.
\n\t\tWhen you create a function, it’s in the DEVELOPMENT
stage. In this stage,\n\t\t\tyou can test the function with TestFunction
, and update it with\n\t\t\t\tUpdateFunction
.
When you’re ready to use your function with a CloudFront distribution, use\n\t\t\t\tPublishFunction
to copy the function from the DEVELOPMENT
\n\t\t\tstage to LIVE
. When it’s live, you can attach the function to a\n\t\t\tdistribution’s cache behavior, using the function’s ARN.
Configuration information about the function, including an optional comment and the\n\t\t\tfunction's runtime.
", + "smithy.api#documentation": "Configuration information about the function, including an optional comment and the\n\t\t\tfunction’s runtime.
", "smithy.api#required": {} } }, @@ -3988,7 +3988,7 @@ } ], "traits": { - "smithy.api#documentation": "Create a new invalidation.
", + "smithy.api#documentation": "Create a new invalidation.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/distribution/{DistributionId}/invalidation", @@ -4066,7 +4066,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a key group that you can use with CloudFront signed URLs and signed cookies.
\nTo create a key group, you must specify at least one public key for the key group.\n\t\t\tAfter you create a key group, you can reference it from one or more cache behaviors.\n\t\t\tWhen you reference a key group in a cache behavior, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with a private key whose corresponding public key is in the key group. The signed\n\t\t\tURL or cookie contains information about which public key CloudFront should use to verify the\n\t\t\tsignature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Creates a key group that you can use with CloudFront signed URLs and signed cookies.
\n\t\tTo create a key group, you must specify at least one public key for the key group.\n\t\t\tAfter you create a key group, you can reference it from one or more cache behaviors.\n\t\t\tWhen you reference a key group in a cache behavior, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with a private key whose corresponding public key is in the key group. The signed\n\t\t\tURL or cookie contains information about which public key CloudFront should use to verify the\n\t\t\tsignature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/key-group", @@ -4137,7 +4137,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables additional CloudWatch metrics for the specified CloudFront distribution. The\n\t\t\tadditional metrics incur an additional cost.
\nFor more information, see Viewing additional CloudFront distribution metrics in\n\t\t\tthe Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Enables additional CloudWatch metrics for the specified CloudFront distribution. The\n\t\t\tadditional metrics incur an additional cost.
\n\t\tFor more information, see Viewing additional CloudFront distribution metrics in\n\t\t\tthe Amazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/distributions/{DistributionId}/monitoring-subscription", @@ -4199,7 +4199,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new origin access control in CloudFront. After you create an origin access\n\t\t\tcontrol, you can add it to an origin in a CloudFront distribution so that CloudFront sends\n\t\t\tauthenticated (signed) requests to the origin.
\nFor an Amazon S3 origin, this makes it possible to block public access to the Amazon S3 bucket\n\t\t\tso that viewers (users) can access the content in the bucket only through CloudFront.
\nFor more information about using a CloudFront origin access control, see Restricting access to an Amazon S3 origin in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Creates a new origin access control in CloudFront. After you create an origin access\n\t\t\tcontrol, you can add it to an origin in a CloudFront distribution so that CloudFront sends\n\t\t\tauthenticated (signed) requests to the origin.
\n\t\tFor an Amazon S3 origin, this makes it possible to block public access to the Amazon S3 bucket\n\t\t\tso that viewers (users) can access the content in the bucket only through CloudFront.
\n\t\tFor more information about using a CloudFront origin access control, see Restricting access to an Amazon S3 origin in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/origin-access-control", @@ -4282,7 +4282,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an origin request policy.
\nAfter you create an origin request policy, you can attach it to one or more cache\n\t\t\tbehaviors. When it's attached to a cache behavior, the origin request policy determines\n\t\t\tthe values that CloudFront includes in requests that it sends to the origin. Each request that\n\t\t\tCloudFront sends to the origin includes the following:
\nThe request body and the URL path (without the domain name) from the viewer\n\t\t\t\t\trequest.
\nThe headers that CloudFront automatically includes in every origin request,\n\t\t\t\t\tincluding Host
, User-Agent
, and\n\t\t\t\t\t\tX-Amz-Cf-Id
.
All HTTP headers, cookies, and URL query strings that are specified in the\n\t\t\t\t\tcache policy or the origin request policy. These can include items from the\n\t\t\t\t\tviewer request and, in the case of headers, additional ones that are added by\n\t\t\t\t\tCloudFront.
\nCloudFront sends a request when it can't find a valid object in its cache that matches the\n\t\t\trequest. If you want to send values to the origin and also include them in the cache\n\t\t\tkey, use CachePolicy
.
For more information about origin request policies, see Controlling origin requests in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Creates an origin request policy.
\n\t\tAfter you create an origin request policy, you can attach it to one or more cache\n\t\t\tbehaviors. When it’s attached to a cache behavior, the origin request policy determines\n\t\t\tthe values that CloudFront includes in requests that it sends to the origin. Each request that\n\t\t\tCloudFront sends to the origin includes the following:
\n\t\tThe request body and the URL path (without the domain name) from the viewer\n\t\t\t\t\trequest.
\n\t\t\tThe headers that CloudFront automatically includes in every origin request,\n\t\t\t\t\tincluding Host
, User-Agent
, and\n\t\t\t\t\t\tX-Amz-Cf-Id
.
All HTTP headers, cookies, and URL query strings that are specified in the\n\t\t\t\t\tcache policy or the origin request policy. These can include items from the\n\t\t\t\t\tviewer request and, in the case of headers, additional ones that are added by\n\t\t\t\t\tCloudFront.
\n\t\t\tCloudFront sends a request when it can’t find a valid object in its cache that matches the\n\t\t\trequest. If you want to send values to the origin and also include them in the cache\n\t\t\tkey, use CachePolicy
.
For more information about origin request policies, see Controlling origin requests in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/origin-request-policy", @@ -4421,7 +4421,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a real-time log configuration.
\nAfter you create a real-time log configuration, you can attach it to one or more cache\n\t\t\tbehaviors to send real-time log data to the specified Amazon Kinesis data stream.
\nFor more information about real-time log configurations, see Real-time logs in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Creates a real-time log configuration.
\n\t\tAfter you create a real-time log configuration, you can attach it to one or more cache\n\t\t\tbehaviors to send real-time log data to the specified Amazon Kinesis data stream.
\n\t\tFor more information about real-time log configurations, see Real-time logs in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/realtime-log-config", @@ -4442,7 +4442,7 @@ "Fields": { "target": "com.amazonaws.cloudfront#FieldList", "traits": { - "smithy.api#documentation": "A list of fields to include in each real-time log record.
\nFor more information about fields, see Real-time log configuration fields in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "A list of fields to include in each real-time log record.
\n\t\tFor more information about fields, see Real-time log configuration fields in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, @@ -4505,7 +4505,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a response headers policy.
\nA response headers policy contains information about a set of HTTP response headers\n\t\t\tand their values. To create a response headers policy, you provide some metadata about\n\t\t\tthe policy, and a set of configurations that specify the response headers.
\nAfter you create a response headers policy, you can use its ID to attach it to one or\n\t\t\tmore cache behaviors in a CloudFront distribution. When it's attached to a cache behavior,\n\t\t\tCloudFront adds the headers in the policy to HTTP responses that it sends for requests that\n\t\t\tmatch the cache behavior.
", + "smithy.api#documentation": "Creates a response headers policy.
\n\t\tA response headers policy contains information about a set of HTTP response headers\n\t\t\tand their values. To create a response headers policy, you provide some metadata about\n\t\t\tthe policy, and a set of configurations that specify the response headers.
\n\t\tAfter you create a response headers policy, you can use its ID to attach it to one or\n\t\t\tmore cache behaviors in a CloudFront distribution. When it’s attached to a cache behavior,\n\t\t\tCloudFront adds the headers in the policy to HTTP responses that it sends for requests that\n\t\t\tmatch the cache behavior.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/response-headers-policy", @@ -4724,7 +4724,7 @@ "StreamingDistributionConfigWithTags": { "target": "com.amazonaws.cloudfront#StreamingDistributionConfigWithTags", "traits": { - "smithy.api#documentation": "The streaming distribution's configuration information.
", + "smithy.api#documentation": "The streaming distribution's configuration information.
", "smithy.api#httpPayload": {}, "smithy.api#required": {}, "smithy.api#xmlName": "StreamingDistributionConfigWithTags" @@ -4741,7 +4741,7 @@ "StreamingDistribution": { "target": "com.amazonaws.cloudfront#StreamingDistribution", "traits": { - "smithy.api#documentation": "The streaming distribution's information.
", + "smithy.api#documentation": "The streaming distribution's information.
", "smithy.api#httpPayload": {} } }, @@ -4761,7 +4761,7 @@ } }, "traits": { - "smithy.api#documentation": "The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#CustomErrorResponse": { @@ -4777,24 +4777,24 @@ "ResponsePagePath": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The path to the custom error page that you want CloudFront to return to a viewer when your\n\t\t\torigin returns the HTTP status code specified by ErrorCode
, for example,\n\t\t\t\t/4xx-errors/403-forbidden.html
. If you want to store your objects and\n\t\t\tyour custom error pages in different locations, your distribution must include a cache\n\t\t\tbehavior for which the following is true:
The value of PathPattern
matches the path to your custom error\n\t\t\t\t\tmessages. For example, suppose you saved custom error pages for 4xx errors in an\n\t\t\t\t\tAmazon S3 bucket in a directory named /4xx-errors
. Your distribution\n\t\t\t\t\tmust include a cache behavior for which the path pattern routes requests for\n\t\t\t\t\tyour custom error pages to that location, for example,\n\t\t\t\t\t\t/4xx-errors/*
.
The value of TargetOriginId
specifies the value of the\n\t\t\t\t\t\tID
element for the origin that contains your custom error\n\t\t\t\t\tpages.
If you specify a value for ResponsePagePath
, you must also specify a\n\t\t\tvalue for ResponseCode
.
We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom\n\t\t\terror pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get\n\t\t\tthe files that you want to return to viewers because the origin server is\n\t\t\tunavailable.
" + "smithy.api#documentation": "The path to the custom error page that you want CloudFront to return to a viewer when your\n\t\t\torigin returns the HTTP status code specified by ErrorCode
, for example,\n\t\t\t\t/4xx-errors/403-forbidden.html
. If you want to store your objects and\n\t\t\tyour custom error pages in different locations, your distribution must include a cache\n\t\t\tbehavior for which the following is true:
The value of PathPattern
matches the path to your custom error\n\t\t\t\t\tmessages. For example, suppose you saved custom error pages for 4xx errors in an\n\t\t\t\t\tAmazon S3 bucket in a directory named /4xx-errors
. Your distribution\n\t\t\t\t\tmust include a cache behavior for which the path pattern routes requests for\n\t\t\t\t\tyour custom error pages to that location, for example,\n\t\t\t\t\t\t/4xx-errors/*
.
The value of TargetOriginId
specifies the value of the\n\t\t\t\t\t\tID
element for the origin that contains your custom error\n\t\t\t\t\tpages.
If you specify a value for ResponsePagePath
, you must also specify a\n\t\t\tvalue for ResponseCode
.
We recommend that you store custom error pages in an Amazon S3 bucket. If you store custom\n\t\t\terror pages on an HTTP server and the server starts to return 5xx errors, CloudFront can't get\n\t\t\tthe files that you want to return to viewers because the origin server is\n\t\t\tunavailable.
" } }, "ResponseCode": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The HTTP status code that you want CloudFront to return to the viewer along with the custom\n\t\t\terror page. There are a variety of reasons that you might want CloudFront to return a status\n\t\t\tcode different from the status code that your origin returned to CloudFront, for\n\t\t\texample:
\nSome Internet devices (some firewalls and corporate proxies, for example)\n\t\t\t\t\tintercept HTTP 4xx and 5xx and prevent the response from being returned to the\n\t\t\t\t\tviewer. If you substitute 200
, the response typically won't be\n\t\t\t\t\tintercepted.
If you don't care about distinguishing among different client errors or server\n\t\t\t\t\terrors, you can specify 400
or 500
as the\n\t\t\t\t\t\tResponseCode
for all 4xx or 5xx errors.
You might want to return a 200
status code (OK) and static\n\t\t\t\t\twebsite so your customers don't know that your website is down.
If you specify a value for ResponseCode
, you must also specify a value\n\t\t\tfor ResponsePagePath
.
The HTTP status code that you want CloudFront to return to the viewer along with the custom\n\t\t\terror page. There are a variety of reasons that you might want CloudFront to return a status\n\t\t\tcode different from the status code that your origin returned to CloudFront, for\n\t\t\texample:
\n\t\tSome Internet devices (some firewalls and corporate proxies, for example)\n\t\t\t\t\tintercept HTTP 4xx and 5xx and prevent the response from being returned to the\n\t\t\t\t\tviewer. If you substitute 200
, the response typically won't be\n\t\t\t\t\tintercepted.
If you don't care about distinguishing among different client errors or server\n\t\t\t\t\terrors, you can specify 400
or 500
as the\n\t\t\t\t\t\tResponseCode
for all 4xx or 5xx errors.
You might want to return a 200
status code (OK) and static\n\t\t\t\t\twebsite so your customers don't know that your website is down.
If you specify a value for ResponseCode
, you must also specify a value\n\t\t\tfor ResponsePagePath
.
The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status\n\t\t\tcode specified in ErrorCode
. When this time period has elapsed, CloudFront\n\t\t\tqueries your origin to see whether the problem that caused the error has been resolved\n\t\t\tand the requested object is now available.
For more information, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The minimum amount of time, in seconds, that you want CloudFront to cache the HTTP status\n\t\t\tcode specified in ErrorCode
. When this time period has elapsed, CloudFront\n\t\t\tqueries your origin to see whether the problem that caused the error has been resolved\n\t\t\tand the requested object is now available.
For more information, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" } } }, "traits": { - "smithy.api#documentation": "A complex type that controls:
\nWhether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom\n\t\t\t\t\terror messages before returning the response to the viewer.
\nHow long CloudFront caches HTTP status codes in the 4xx and 5xx range.
\nFor more information about custom error pages, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that controls:
\n\t\tWhether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom\n\t\t\t\t\terror messages before returning the response to the viewer.
\n\t\t\tHow long CloudFront caches HTTP status codes in the 4xx and 5xx range.
\n\t\t\tFor more information about custom error pages, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#CustomErrorResponseList": { @@ -4824,7 +4824,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that controls:
\nWhether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom\n\t\t\t\t\terror messages before returning the response to the viewer.
\nHow long CloudFront caches HTTP status codes in the 4xx and 5xx range.
\nFor more information about custom error pages, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that controls:
\n\t\tWhether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom\n\t\t\t\t\terror messages before returning the response to the viewer.
\n\t\t\tHow long CloudFront caches HTTP status codes in the 4xx and 5xx range.
\n\t\t\tFor more information about custom error pages, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#CustomHeaders": { @@ -4845,7 +4845,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that contains the list of Custom Headers for each origin.
" + "smithy.api#documentation": "A complex type that contains the list of Custom Headers for each origin.
" } }, "com.amazonaws.cloudfront#CustomOriginConfig": { @@ -4868,31 +4868,31 @@ "OriginProtocolPolicy": { "target": "com.amazonaws.cloudfront#OriginProtocolPolicy", "traits": { - "smithy.api#documentation": "Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin. Valid\n\t\t\tvalues are:
\n\n http-only
– CloudFront always uses HTTP to connect to the\n\t\t\t\t\torigin.
\n match-viewer
– CloudFront connects to the origin using the same\n\t\t\t\t\tprotocol that the viewer used to connect to CloudFront.
\n https-only
– CloudFront always uses HTTPS to connect to the\n\t\t\t\t\torigin.
Specifies the protocol (HTTP or HTTPS) that CloudFront uses to connect to the origin. Valid\n\t\t\tvalues are:
\n\t\t\n http-only
– CloudFront always uses HTTP to connect to the\n\t\t\t\t\torigin.
\n match-viewer
– CloudFront connects to the origin using the same\n\t\t\t\t\tprotocol that the viewer used to connect to CloudFront.
\n https-only
– CloudFront always uses HTTPS to connect to the\n\t\t\t\t\torigin.
Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin\n\t\t\tover HTTPS. Valid values include SSLv3
, TLSv1
,\n\t\t\t\tTLSv1.1
, and TLSv1.2
.
For more information, see Minimum Origin SSL Protocol in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "Specifies the minimum SSL/TLS protocol that CloudFront uses when connecting to your origin\n\t\t\tover HTTPS. Valid values include SSLv3
, TLSv1
,\n\t\t\t\tTLSv1.1
, and TLSv1.2
.
For more information, see Minimum Origin SSL Protocol in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "OriginReadTimeout": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is\n\t\t\talso known as the origin response timeout. The minimum timeout is 1\n\t\t\tsecond, the maximum is 60 seconds, and the default (if you don't specify otherwise) is\n\t\t\t30 seconds.
\nFor more information, see Origin Response Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "Specifies how long, in seconds, CloudFront waits for a response from the origin. This is\n\t\t\talso known as the origin response timeout. The minimum timeout is 1\n\t\t\tsecond, the maximum is 60 seconds, and the default (if you don’t specify otherwise) is\n\t\t\t30 seconds.
\n\t\tFor more information, see Origin Response Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "OriginKeepaliveTimeout": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The\n\t\t\tminimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don't\n\t\t\tspecify otherwise) is 5 seconds.
\nFor more information, see Origin Keep-alive Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "Specifies how long, in seconds, CloudFront persists its connection to the origin. The\n\t\t\tminimum timeout is 1 second, the maximum is 60 seconds, and the default (if you don’t\n\t\t\tspecify otherwise) is 5 seconds.
\n\t\tFor more information, see Origin Keep-alive Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } } }, "traits": { - "smithy.api#documentation": "A custom origin. A custom origin is any origin that is not an\n\t\t\tAmazon S3 bucket, with one exception. An Amazon S3 bucket that is configured with\n\t\t\t\tstatic website hosting\n is a custom origin.
" + "smithy.api#documentation": "A custom origin. A custom origin is any origin that is not an\n\t\t\tAmazon S3 bucket, with one exception. An Amazon S3 bucket that is configured with\n\t\t\t\tstatic website hosting\n\t\t\t is a custom origin.
" } }, "com.amazonaws.cloudfront#DefaultCacheBehavior": { @@ -4908,19 +4908,19 @@ "TrustedSigners": { "target": "com.amazonaws.cloudfront#TrustedSigners", "traits": { - "smithy.api#documentation": "We recommend using TrustedKeyGroups
instead of\n\t\t\t\t\tTrustedSigners
.
A list of Amazon Web Services account IDs whose public keys CloudFront can use to validate signed URLs or\n\t\t\tsigned cookies.
\nWhen a cache behavior contains trusted signers, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with the private key of a CloudFront key pair in a trusted signer's Amazon Web Services account. The\n\t\t\tsigned URL or cookie contains information about which public key CloudFront should use to\n\t\t\tverify the signature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "We recommend using TrustedKeyGroups
instead of\n\t\t\t\t\tTrustedSigners
.
A list of Amazon Web Services account IDs whose public keys CloudFront can use to validate signed URLs or\n\t\t\tsigned cookies.
\n\t\tWhen a cache behavior contains trusted signers, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with the private key of a CloudFront key pair in a trusted signer’s Amazon Web Services account. The\n\t\t\tsigned URL or cookie contains information about which public key CloudFront should use to\n\t\t\tverify the signature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "TrustedKeyGroups": { "target": "com.amazonaws.cloudfront#TrustedKeyGroups", "traits": { - "smithy.api#documentation": "A list of key groups that CloudFront can use to validate signed URLs or signed\n\t\t\tcookies.
\nWhen a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with a private key whose corresponding public key is in the key group. The signed\n\t\t\tURL or cookie contains information about which public key CloudFront should use to verify the\n\t\t\tsignature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A list of key groups that CloudFront can use to validate signed URLs or signed\n\t\t\tcookies.
\n\t\tWhen a cache behavior contains trusted key groups, CloudFront requires signed URLs or signed\n\t\t\tcookies for all requests that match the cache behavior. The URLs or cookies must be\n\t\t\tsigned with a private key whose corresponding public key is in the key group. The signed\n\t\t\tURL or cookie contains information about which public key CloudFront should use to verify the\n\t\t\tsignature. For more information, see Serving private content in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "ViewerProtocolPolicy": { "target": "com.amazonaws.cloudfront#ViewerProtocolPolicy", "traits": { - "smithy.api#documentation": "The protocol that viewers can use to access the files in the origin specified by\n\t\t\t\tTargetOriginId
when a request matches the path pattern in\n\t\t\t\tPathPattern
. You can specify the following options:
\n allow-all
: Viewers can use HTTP or HTTPS.
\n redirect-to-https
: If a viewer submits an HTTP request, CloudFront\n\t\t\t\t\treturns an HTTP status code of 301 (Moved Permanently) to the viewer along with\n\t\t\t\t\tthe HTTPS URL. The viewer then resubmits the request using the new URL.
\n https-only
: If a viewer sends an HTTP request, CloudFront returns an\n\t\t\t\t\tHTTP status code of 403 (Forbidden).
For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nThe only way to guarantee that viewers retrieve an object that was fetched from\n\t\t\t\tthe origin using HTTPS is never to use any other protocol to fetch the object. If\n\t\t\t\tyou have recently changed from HTTP to HTTPS, we recommend that you clear your\n\t\t\t\tobjects' cache because cached objects are protocol agnostic. That means that an edge\n\t\t\t\tlocation will return an object from the cache regardless of whether the current\n\t\t\t\trequest protocol matches the protocol used previously. For more information, see\n\t\t\t\t\tManaging Cache\n\t\t\t\t\tExpiration in the Amazon CloudFront Developer Guide.
\nThe protocol that viewers can use to access the files in the origin specified by\n\t\t\t\tTargetOriginId
when a request matches the path pattern in\n\t\t\t\tPathPattern
. You can specify the following options:
\n allow-all
: Viewers can use HTTP or HTTPS.
\n redirect-to-https
: If a viewer submits an HTTP request, CloudFront\n\t\t\t\t\treturns an HTTP status code of 301 (Moved Permanently) to the viewer along with\n\t\t\t\t\tthe HTTPS URL. The viewer then resubmits the request using the new URL.
\n https-only
: If a viewer sends an HTTP request, CloudFront returns an\n\t\t\t\t\tHTTP status code of 403 (Forbidden).
For more information about requiring the HTTPS protocol, see Requiring HTTPS Between Viewers and CloudFront in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tThe only way to guarantee that viewers retrieve an object that was fetched from\n\t\t\t\tthe origin using HTTPS is never to use any other protocol to fetch the object. If\n\t\t\t\tyou have recently changed from HTTP to HTTPS, we recommend that you clear your\n\t\t\t\tobjects’ cache because cached objects are protocol agnostic. That means that an edge\n\t\t\t\tlocation will return an object from the cache regardless of whether the current\n\t\t\t\trequest protocol matches the protocol used previously. For more information, see\n\t\t\t\t\tManaging Cache\n\t\t\t\t\tExpiration in the Amazon CloudFront Developer Guide.
\n\t\tIndicates whether you want to distribute media files in the Microsoft Smooth Streaming\n\t\t\tformat using the origin that is associated with this cache behavior. If so, specify\n\t\t\t\ttrue
; if not, specify false
. If you specify\n\t\t\t\ttrue
for SmoothStreaming
, you can still distribute other\n\t\t\tcontent using this cache behavior if the content matches the value of\n\t\t\t\tPathPattern
.
Indicates whether you want to distribute media files in the Microsoft Smooth Streaming\n\t\t\tformat using the origin that is associated with this cache behavior. If so, specify\n\t\t\t\ttrue
; if not, specify false
. If you specify\n\t\t\t\ttrue
for SmoothStreaming
, you can still distribute other\n\t\t\tcontent using this cache behavior if the content matches the value of\n\t\t\t\tPathPattern
.
The unique identifier of the cache policy that is attached to the default cache\n\t\t\tbehavior. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nA DefaultCacheBehavior
must include either a CachePolicyId
\n\t\t\tor ForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
The unique identifier of the cache policy that is attached to the default cache\n\t\t\tbehavior. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA DefaultCacheBehavior
must include either a CachePolicyId
\n\t\t\tor ForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field. For more information, see Working with policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nIf you want to include values in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nIf you want to send values to the origin but not include them in the cache key, use an\n\t\t\torigin request policy. For more information, see Creating origin request policies or Using the managed origin request policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nA DefaultCacheBehavior
must include either a CachePolicyId
\n\t\t\tor ForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
A complex type that specifies how CloudFront handles query strings, cookies, and HTTP\n\t\t\theaders.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field. For more information, see Working with policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you want to include values in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you want to send values to the origin but not include them in the cache key, use an\n\t\t\torigin request policy. For more information, see Creating origin request policies or Using the managed origin request policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA DefaultCacheBehavior
must include either a CachePolicyId
\n\t\t\tor ForwardedValues
. We recommend that you use a\n\t\t\tCachePolicyId
.
A complex type that specifies how CloudFront handles query strings, cookies, and HTTP\n\t\t\theaders.
" } }, "MinTTL": { "target": "com.amazonaws.cloudfront#long", "traits": { "smithy.api#deprecated": {}, - "smithy.api#documentation": "This field is deprecated. We recommend that you use the MinTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. For more information, see Managing How Long\n\t\t\t\tContent Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nYou must specify 0
for MinTTL
if you configure CloudFront to\n\t\t\tforward all headers to your origin (under Headers
, if you specify\n\t\t\t\t1
for Quantity
and *
for\n\t\t\tName
).
This field is deprecated. We recommend that you use the MinTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The minimum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. For more information, see Managing How Long\n\t\t\t\tContent Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tYou must specify 0
for MinTTL
if you configure CloudFront to\n\t\t\tforward all headers to your origin (under Headers
, if you specify\n\t\t\t\t1
for Quantity
and *
for\n\t\t\tName
).
This field is deprecated. We recommend that you use the DefaultTTL
field\n\t\t\tin a cache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin does not add HTTP\n\t\t\theaders such as Cache-Control max-age
, Cache-Control s-maxage
,\n\t\t\tand Expires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field is deprecated. We recommend that you use the DefaultTTL
field\n\t\t\tin a cache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The default amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin does not add HTTP\n\t\t\theaders such as Cache-Control max-age
, Cache-Control s-maxage
,\n\t\t\tand Expires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field is deprecated. We recommend that you use the MaxTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin adds HTTP headers such\n\t\t\tas Cache-Control max-age
, Cache-Control s-maxage
, and\n\t\t\t\tExpires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field is deprecated. We recommend that you use the MaxTTL
field in a\n\t\t\tcache policy instead of this field. For more information, see Creating cache policies or Using the managed cache policies in the\n\t\t\t\tAmazon CloudFront Developer Guide.
The maximum amount of time that you want objects to stay in CloudFront caches before CloudFront\n\t\t\tforwards another request to your origin to determine whether the object has been\n\t\t\tupdated. The value that you specify applies only when your origin adds HTTP headers such\n\t\t\tas Cache-Control max-age
, Cache-Control s-maxage
, and\n\t\t\t\tExpires
to objects. For more information, see Managing How Long Content Stays in an Edge Cache (Expiration) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
A complex type that describes the default cache behavior if you don't specify a\n\t\t\t\tCacheBehavior
element or if request URLs don't match any of the values\n\t\t\tof PathPattern
in CacheBehavior
elements. You must create\n\t\t\texactly one default cache behavior.
A complex type that describes the default cache behavior if you don’t specify a\n\t\t\t\tCacheBehavior
element or if request URLs don’t match any of the values\n\t\t\tof PathPattern
in CacheBehavior
elements. You must create\n\t\t\texactly one default cache behavior.
Deletes a cache policy.
\nYou cannot delete a cache policy if it's attached to a cache behavior. First update\n\t\t\tyour distributions to remove the cache policy from all cache behaviors, then delete the\n\t\t\tcache policy.
\nTo delete a cache policy, you must provide the policy's identifier and version. To get\n\t\t\tthese values, you can use ListCachePolicies
or\n\t\t\tGetCachePolicy
.
Deletes a cache policy.
\n\t\tYou cannot delete a cache policy if it’s attached to a cache behavior. First update\n\t\t\tyour distributions to remove the cache policy from all cache behaviors, then delete the\n\t\t\tcache policy.
\n\t\tTo delete a cache policy, you must provide the policy’s identifier and version. To get\n\t\t\tthese values, you can use ListCachePolicies
or\n\t\t\tGetCachePolicy
.
The version of the cache policy that you are deleting. The version is the cache\n\t\t\tpolicy's ETag
value, which you can get using\n\t\t\tListCachePolicies
, GetCachePolicy
, or\n\t\t\t\tGetCachePolicyConfig
.
The version of the cache policy that you are deleting. The version is the cache\n\t\t\tpolicy’s ETag
value, which you can get using\n\t\t\tListCachePolicies
, GetCachePolicy
, or\n\t\t\t\tGetCachePolicyConfig
.
Delete an origin access identity.
", + "smithy.api#documentation": "Delete an origin access identity.
", "smithy.api#http": { "method": "DELETE", "uri": "/2020-05-31/origin-access-identity/cloudfront/{Id}", @@ -5157,7 +5157,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a continuous deployment policy.
\nYou cannot delete a continuous deployment policy that's attached to a primary\n\t\t\tdistribution. First update your distribution to remove the continuous deployment policy,\n\t\t\tthen you can delete the policy.
", + "smithy.api#documentation": "Deletes a continuous deployment policy.
\n\t\tYou cannot delete a continuous deployment policy that’s attached to a primary\n\t\t\tdistribution. First update your distribution to remove the continuous deployment policy,\n\t\t\tthen you can delete the policy.
", "smithy.api#http": { "method": "DELETE", "uri": "/2020-05-31/continuous-deployment-policy/{Id}", @@ -5211,7 +5211,7 @@ } ], "traits": { - "smithy.api#documentation": "Delete a distribution.
", + "smithy.api#documentation": "Delete a distribution.
", "smithy.api#http": { "method": "DELETE", "uri": "/2020-05-31/distribution/{Id}", @@ -5225,7 +5225,7 @@ "Id": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The distribution ID.
", + "smithy.api#documentation": "The distribution ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5233,13 +5233,13 @@ "IfMatch": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The value of the ETag
header that you received when you disabled the\n\t\t\tdistribution. For example: E2QWRUHAPOMQZL
.
The value of the ETag
header that you received when you disabled the\n\t\t\tdistribution. For example: E2QWRUHAPOMQZL
.
This action deletes a web distribution. To delete a web distribution using the CloudFront\n\t\t\tAPI, perform the following steps.
\n\n To delete a web distribution using the CloudFront API:\n
\nDisable the web distribution
\nSubmit a GET Distribution Config
request to get the current\n\t\t\t\t\tconfiguration and the Etag
header for the distribution.
Update the XML document that was returned in the response to your GET\n\t\t\t\t\t\tDistribution Config
request to change the value of\n\t\t\t\t\t\tEnabled
to false
.
Submit a PUT Distribution Config
request to update the\n\t\t\t\t\tconfiguration for your distribution. In the request body, include the XML\n\t\t\t\t\tdocument that you updated in Step 3. Set the value of the HTTP\n\t\t\t\t\t\tIf-Match
header to the value of the ETag
header\n\t\t\t\t\tthat CloudFront returned when you submitted the GET Distribution Config
\n\t\t\t\t\trequest in Step 2.
Review the response to the PUT Distribution Config
request to\n\t\t\t\t\tconfirm that the distribution was successfully disabled.
Submit a GET Distribution
request to confirm that your changes\n\t\t\t\t\thave propagated. When propagation is complete, the value of Status
\n\t\t\t\t\tis Deployed
.
Submit a DELETE Distribution
request. Set the value of the HTTP\n\t\t\t\t\t\tIf-Match
header to the value of the ETag
header\n\t\t\t\t\tthat CloudFront returned when you submitted the GET Distribution Config
\n\t\t\t\t\trequest in Step 6.
Review the response to your DELETE Distribution
request to\n\t\t\t\t\tconfirm that the distribution was successfully deleted.
For information about deleting a distribution using the CloudFront console, see Deleting a\n\t\t\t\tDistribution in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "This action deletes a web distribution. To delete a web distribution using the CloudFront\n\t\t\tAPI, perform the following steps.
\n\t\t\n\t\t\t To delete a web distribution using the CloudFront API:\n\t\t
\n\t\tDisable the web distribution
\n\t\t\tSubmit a GET Distribution Config
request to get the current\n\t\t\t\t\tconfiguration and the Etag
header for the distribution.
Update the XML document that was returned in the response to your GET\n\t\t\t\t\t\tDistribution Config
request to change the value of\n\t\t\t\t\t\tEnabled
to false
.
Submit a PUT Distribution Config
request to update the\n\t\t\t\t\tconfiguration for your distribution. In the request body, include the XML\n\t\t\t\t\tdocument that you updated in Step 3. Set the value of the HTTP\n\t\t\t\t\t\tIf-Match
header to the value of the ETag
header\n\t\t\t\t\tthat CloudFront returned when you submitted the GET Distribution Config
\n\t\t\t\t\trequest in Step 2.
Review the response to the PUT Distribution Config
request to\n\t\t\t\t\tconfirm that the distribution was successfully disabled.
Submit a GET Distribution
request to confirm that your changes\n\t\t\t\t\thave propagated. When propagation is complete, the value of Status
\n\t\t\t\t\tis Deployed
.
Submit a DELETE Distribution
request. Set the value of the HTTP\n\t\t\t\t\t\tIf-Match
header to the value of the ETag
header\n\t\t\t\t\tthat CloudFront returned when you submitted the GET Distribution Config
\n\t\t\t\t\trequest in Step 6.
Review the response to your DELETE Distribution
request to\n\t\t\t\t\tconfirm that the distribution was successfully deleted.
For information about deleting a distribution using the CloudFront console, see Deleting a\n\t\t\t\tDistribution in the Amazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#DeleteFieldLevelEncryptionConfig": { @@ -5376,7 +5376,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a CloudFront function.
\nYou cannot delete a function if it's associated with a cache behavior. First, update\n\t\t\tyour distributions to remove the function association from all cache behaviors, then\n\t\t\tdelete the function.
\nTo delete a function, you must provide the function's name and version\n\t\t\t\t(ETag
value). To get these values, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
Deletes a CloudFront function.
\n\t\tYou cannot delete a function if it’s associated with a cache behavior. First, update\n\t\t\tyour distributions to remove the function association from all cache behaviors, then\n\t\t\tdelete the function.
\n\t\tTo delete a function, you must provide the function’s name and version\n\t\t\t\t(ETag
value). To get these values, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
Deletes a key group.
\nYou cannot delete a key group that is referenced in a cache behavior. First update\n\t\t\tyour distributions to remove the key group from all cache behaviors, then delete the key\n\t\t\tgroup.
\nTo delete a key group, you must provide the key group's identifier and version. To get\n\t\t\tthese values, use ListKeyGroups
followed by GetKeyGroup
or\n\t\t\t\tGetKeyGroupConfig
.
Deletes a key group.
\n\t\tYou cannot delete a key group that is referenced in a cache behavior. First update\n\t\t\tyour distributions to remove the key group from all cache behaviors, then delete the key\n\t\t\tgroup.
\n\t\tTo delete a key group, you must provide the key group’s identifier and version. To get\n\t\t\tthese values, use ListKeyGroups
followed by GetKeyGroup
or\n\t\t\t\tGetKeyGroupConfig
.
The version of the key group that you are deleting. The version is the key group's\n\t\t\t\tETag
value. To get the ETag
, use GetKeyGroup
\n\t\t\tor GetKeyGroupConfig
.
The version of the key group that you are deleting. The version is the key group’s\n\t\t\t\tETag
value. To get the ETag
, use GetKeyGroup
\n\t\t\tor GetKeyGroupConfig
.
Deletes a CloudFront origin access control.
\nYou cannot delete an origin access control if it's in use. First, update all\n\t\t\tdistributions to remove the origin access control from all origins, then delete the\n\t\t\torigin access control.
", + "smithy.api#documentation": "Deletes a CloudFront origin access control.
\n\t\tYou cannot delete an origin access control if it's in use. First, update all\n\t\t\tdistributions to remove the origin access control from all origins, then delete the\n\t\t\torigin access control.
", "smithy.api#http": { "method": "DELETE", "uri": "/2020-05-31/origin-access-control/{Id}", @@ -5587,7 +5587,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an origin request policy.
\nYou cannot delete an origin request policy if it's attached to any cache behaviors.\n\t\t\tFirst update your distributions to remove the origin request policy from all cache\n\t\t\tbehaviors, then delete the origin request policy.
\nTo delete an origin request policy, you must provide the policy's identifier and\n\t\t\tversion. To get the identifier, you can use ListOriginRequestPolicies
or\n\t\t\t\tGetOriginRequestPolicy
.
Deletes an origin request policy.
\n\t\tYou cannot delete an origin request policy if it’s attached to any cache behaviors.\n\t\t\tFirst update your distributions to remove the origin request policy from all cache\n\t\t\tbehaviors, then delete the origin request policy.
\n\t\tTo delete an origin request policy, you must provide the policy’s identifier and\n\t\t\tversion. To get the identifier, you can use ListOriginRequestPolicies
or\n\t\t\t\tGetOriginRequestPolicy
.
The version of the origin request policy that you are deleting. The version is the\n\t\t\torigin request policy's ETag
value, which you can get using\n\t\t\t\tListOriginRequestPolicies
, GetOriginRequestPolicy
, or\n\t\t\t\tGetOriginRequestPolicyConfig
.
The version of the origin request policy that you are deleting. The version is the\n\t\t\torigin request policy’s ETag
value, which you can get using\n\t\t\t\tListOriginRequestPolicies
, GetOriginRequestPolicy
, or\n\t\t\t\tGetOriginRequestPolicyConfig
.
Deletes a real-time log configuration.
\nYou cannot delete a real-time log configuration if it's attached to a cache behavior.\n\t\t\tFirst update your distributions to remove the real-time log configuration from all cache\n\t\t\tbehaviors, then delete the real-time log configuration.
\nTo delete a real-time log configuration, you can provide the configuration's name or\n\t\t\tits Amazon Resource Name (ARN). You must provide at least one. If you provide both, CloudFront\n\t\t\tuses the name to identify the real-time log configuration to delete.
", + "smithy.api#documentation": "Deletes a real-time log configuration.
\n\t\tYou cannot delete a real-time log configuration if it’s attached to a cache behavior.\n\t\t\tFirst update your distributions to remove the real-time log configuration from all cache\n\t\t\tbehaviors, then delete the real-time log configuration.
\n\t\tTo delete a real-time log configuration, you can provide the configuration’s name or\n\t\t\tits Amazon Resource Name (ARN). You must provide at least one. If you provide both, CloudFront\n\t\t\tuses the name to identify the real-time log configuration to delete.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/delete-realtime-log-config", @@ -5746,7 +5746,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a response headers policy.
\nYou cannot delete a response headers policy if it's attached to a cache behavior.\n\t\t\tFirst update your distributions to remove the response headers policy from all cache\n\t\t\tbehaviors, then delete the response headers policy.
\nTo delete a response headers policy, you must provide the policy's identifier and\n\t\t\tversion. To get these values, you can use ListResponseHeadersPolicies
or\n\t\t\t\tGetResponseHeadersPolicy
.
Deletes a response headers policy.
\n\t\tYou cannot delete a response headers policy if it’s attached to a cache behavior.\n\t\t\tFirst update your distributions to remove the response headers policy from all cache\n\t\t\tbehaviors, then delete the response headers policy.
\n\t\tTo delete a response headers policy, you must provide the policy’s identifier and\n\t\t\tversion. To get these values, you can use ListResponseHeadersPolicies
or\n\t\t\t\tGetResponseHeadersPolicy
.
The identifier for the response headers policy that you are deleting.
\nTo get the identifier, you can use ListResponseHeadersPolicies
.
The identifier for the response headers policy that you are deleting.
\n\t\tTo get the identifier, you can use ListResponseHeadersPolicies
.
The version of the response headers policy that you are deleting.
\nThe version is the response headers policy's ETag
value, which you can\n\t\t\tget using ListResponseHeadersPolicies
,\n\t\t\t\tGetResponseHeadersPolicy
, or\n\t\t\t\tGetResponseHeadersPolicyConfig
.
The version of the response headers policy that you are deleting.
\n\t\tThe version is the response headers policy’s ETag
value, which you can\n\t\t\tget using ListResponseHeadersPolicies
,\n\t\t\t\tGetResponseHeadersPolicy
, or\n\t\t\t\tGetResponseHeadersPolicyConfig
.
Delete a streaming distribution. To delete an RTMP distribution using the CloudFront API,\n\t\t\tperform the following steps.
\n\n To delete an RTMP distribution using the CloudFront\n\t\t\tAPI:
\nDisable the RTMP distribution.
\nSubmit a GET Streaming Distribution Config
request to get the\n\t\t\t\t\tcurrent configuration and the Etag
header for the distribution.\n\t\t\t\t
Update the XML document that was returned in the response to your GET\n\t\t\t\t\t\tStreaming Distribution Config
request to change the value of\n\t\t\t\t\t\tEnabled
to false
.
Submit a PUT Streaming Distribution Config
request to update the\n\t\t\t\t\tconfiguration for your distribution. In the request body, include the XML\n\t\t\t\t\tdocument that you updated in Step 3. Then set the value of the HTTP\n\t\t\t\t\t\tIf-Match
header to the value of the ETag
header\n\t\t\t\t\tthat CloudFront returned when you submitted the GET Streaming Distribution\n\t\t\t\t\t\tConfig
request in Step 2.
Review the response to the PUT Streaming Distribution Config
\n\t\t\t\t\trequest to confirm that the distribution was successfully disabled.
Submit a GET Streaming Distribution Config
request to confirm\n\t\t\t\t\tthat your changes have propagated. When propagation is complete, the value of\n\t\t\t\t\t\tStatus
is Deployed
.
Submit a DELETE Streaming Distribution
request. Set the value of\n\t\t\t\t\tthe HTTP If-Match
header to the value of the ETag
\n\t\t\t\t\theader that CloudFront returned when you submitted the GET Streaming\n\t\t\t\t\t\tDistribution Config
request in Step 2.
Review the response to your DELETE Streaming Distribution
request\n\t\t\t\t\tto confirm that the distribution was successfully deleted.
For information about deleting a distribution using the CloudFront console, see Deleting a\n\t\t\t\tDistribution in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "Delete a streaming distribution. To delete an RTMP distribution using the CloudFront API,\n\t\t\tperform the following steps.
\n\n\t\t\n To delete an RTMP distribution using the CloudFront\n\t\t\tAPI:
\n\t\tDisable the RTMP distribution.
\n\t\t\tSubmit a GET Streaming Distribution Config
request to get the\n\t\t\t\t\tcurrent configuration and the Etag
header for the distribution.\n\t\t\t\t
Update the XML document that was returned in the response to your GET\n\t\t\t\t\t\tStreaming Distribution Config
request to change the value of\n\t\t\t\t\t\tEnabled
to false
.
Submit a PUT Streaming Distribution Config
request to update the\n\t\t\t\t\tconfiguration for your distribution. In the request body, include the XML\n\t\t\t\t\tdocument that you updated in Step 3. Then set the value of the HTTP\n\t\t\t\t\t\tIf-Match
header to the value of the ETag
header\n\t\t\t\t\tthat CloudFront returned when you submitted the GET Streaming Distribution\n\t\t\t\t\t\tConfig
request in Step 2.
Review the response to the PUT Streaming Distribution Config
\n\t\t\t\t\trequest to confirm that the distribution was successfully disabled.
Submit a GET Streaming Distribution Config
request to confirm\n\t\t\t\t\tthat your changes have propagated. When propagation is complete, the value of\n\t\t\t\t\t\tStatus
is Deployed
.
Submit a DELETE Streaming Distribution
request. Set the value of\n\t\t\t\t\tthe HTTP If-Match
header to the value of the ETag
\n\t\t\t\t\theader that CloudFront returned when you submitted the GET Streaming\n\t\t\t\t\t\tDistribution Config
request in Step 2.
Review the response to your DELETE Streaming Distribution
request\n\t\t\t\t\tto confirm that the distribution was successfully deleted.
For information about deleting a distribution using the CloudFront console, see Deleting a\n\t\t\t\tDistribution in the Amazon CloudFront Developer Guide.
", "smithy.api#http": { "method": "DELETE", "uri": "/2020-05-31/streaming-distribution/{Id}", @@ -5814,7 +5814,7 @@ "Id": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The distribution ID.
", + "smithy.api#documentation": "The distribution ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5848,7 +5848,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets configuration information and metadata about a CloudFront function, but not the\n\t\t\tfunction's code. To get a function's code, use GetFunction
.
To get configuration information and metadata about a function, you must provide the\n\t\t\tfunction's name and stage. To get these values, you can use\n\t\t\tListFunctions
.
Gets configuration information and metadata about a CloudFront function, but not the\n\t\t\tfunction’s code. To get a function’s code, use GetFunction
.
To get configuration information and metadata about a function, you must provide the\n\t\t\tfunction’s name and stage. To get these values, you can use\n\t\t\tListFunctions
.
The function's stage, either DEVELOPMENT
or LIVE
.
The function’s stage, either DEVELOPMENT
or LIVE
.
The distribution's identifier. For example: E1U5RQF7T870K0
.
The distribution’s identifier. For example: E1U5RQF7T870K0
.
The distribution's Amazon Resource Name (ARN).
", + "smithy.api#documentation": "The distribution’s Amazon Resource Name (ARN).
", "smithy.api#required": {} } }, "Status": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The distribution's status. When the status is Deployed
, the\n\t\t\tdistribution's information is fully propagated to all CloudFront edge locations.
The distribution’s status. When the status is Deployed
, the\n\t\t\tdistribution’s information is fully propagated to all CloudFront edge locations.
The number of invalidation batches currently in progress.
", + "smithy.api#documentation": "The number of invalidation batches currently in progress.
", "smithy.api#required": {} } }, "DomainName": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The distribution's CloudFront domain name. For example:\n\t\t\td111111abcdef8.cloudfront.net
.
The distribution’s CloudFront domain name. For example:\n\t\t\td111111abcdef8.cloudfront.net
.
We recommend using TrustedKeyGroups
instead of\n\t\t\t\t\tTrustedSigners
.
This field contains a list of Amazon Web Services account IDs and the active CloudFront key pairs in each\n\t\t\taccount that CloudFront can use to verify the signatures of signed URLs or signed\n\t\t\tcookies.
" + "smithy.api#documentation": "We recommend using TrustedKeyGroups
instead of\n\t\t\t\t\tTrustedSigners
.
CloudFront automatically adds this field to the response if you’ve configured a cache\n\t\t\tbehavior in this distribution to serve private content using trusted signers. This field\n\t\t\tcontains a list of Amazon Web Services account IDs and the active CloudFront key pairs in each account that\n\t\t\tCloudFront can use to verify the signatures of signed URLs or signed cookies.
" } }, "ActiveTrustedKeyGroups": { "target": "com.amazonaws.cloudfront#ActiveTrustedKeyGroups", "traits": { - "smithy.api#documentation": "This field contains a list of key groups and the public keys in each key group that\n\t\t\tCloudFront can use to verify the signatures of signed URLs or signed cookies.
" + "smithy.api#documentation": "CloudFront automatically adds this field to the response if you’ve configured a cache\n\t\t\tbehavior in this distribution to serve private content using key groups. This field\n\t\t\tcontains a list of key groups and the public keys in each key group that CloudFront can use to\n\t\t\tverify the signatures of signed URLs or signed cookies.
" } }, "DistributionConfig": { "target": "com.amazonaws.cloudfront#DistributionConfig", "traits": { - "smithy.api#documentation": "The distribution's configuration.
", + "smithy.api#documentation": "The distribution’s configuration.
", "smithy.api#required": {} } }, "AliasICPRecordals": { "target": "com.amazonaws.cloudfront#AliasICPRecordals", "traits": { - "smithy.api#documentation": "Amazon Web Services services in China customers must file for an Internet Content Provider (ICP)\n\t\t\trecordal if they want to serve content publicly on an alternate domain name, also known\n\t\t\tas a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal\n\t\t\tstatus for CNAMEs associated with distributions.
\nFor more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with Amazon Web Services\n\t\t\t\tservices in China.
" + "smithy.api#documentation": "Amazon Web Services services in China customers must file for an Internet Content Provider (ICP)\n\t\t\trecordal if they want to serve content publicly on an alternate domain name, also known\n\t\t\tas a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal\n\t\t\tstatus for CNAMEs associated with distributions.
\n\t\tFor more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with Amazon Web Services\n\t\t\t\tservices in China.
" } } }, @@ -5989,7 +5989,7 @@ "CallerReference": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A unique value (for example, a date-time stamp) that ensures that the request can't be\n\t\t\treplayed.
\nIf the value of CallerReference
is new (regardless of the content of the\n\t\t\t\tDistributionConfig
object), CloudFront creates a new distribution.
If CallerReference
is a value that you already sent in a previous request\n\t\t\tto create a distribution, CloudFront returns a DistributionAlreadyExists
\n\t\t\terror.
A unique value (for example, a date-time stamp) that ensures that the request can't be\n\t\t\treplayed.
\n\t\tIf the value of CallerReference
is new (regardless of the content of the\n\t\t\t\tDistributionConfig
object), CloudFront creates a new distribution.
If CallerReference
is a value that you already sent in a previous request\n\t\t\tto create a distribution, CloudFront returns a DistributionAlreadyExists
\n\t\t\terror.
The object that you want CloudFront to request from your origin (for example,\n\t\t\t\tindex.html
) when a viewer requests the root URL for your distribution\n\t\t\t\t(https://www.example.com
) instead of an object in your distribution\n\t\t\t\t(https://www.example.com/product-description.html
). Specifying a\n\t\t\tdefault root object avoids exposing the contents of your distribution.
Specify only the object name, for example, index.html
. Don't add a\n\t\t\t\t/
before the object name.
If you don't want to specify a default root object when you create a distribution,\n\t\t\tinclude an empty DefaultRootObject
element.
To delete the default root object from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty DefaultRootObject
\n\t\t\telement.
To replace the default root object, update the distribution configuration and specify\n\t\t\tthe new object.
\nFor more information about the default root object, see Creating a\n\t\t\t\tDefault Root Object in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The object that you want CloudFront to request from your origin (for example,\n\t\t\t\tindex.html
) when a viewer requests the root URL for your distribution\n\t\t\t\t(https://www.example.com
) instead of an object in your distribution\n\t\t\t\t(https://www.example.com/product-description.html
). Specifying a\n\t\t\tdefault root object avoids exposing the contents of your distribution.
Specify only the object name, for example, index.html
. Don't add a\n\t\t\t\t/
before the object name.
If you don't want to specify a default root object when you create a distribution,\n\t\t\tinclude an empty DefaultRootObject
element.
To delete the default root object from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty DefaultRootObject
\n\t\t\telement.
To replace the default root object, update the distribution configuration and specify\n\t\t\tthe new object.
\n\t\tFor more information about the default root object, see Creating a\n\t\t\t\tDefault Root Object in the Amazon CloudFront Developer Guide.
" } }, "Origins": { "target": "com.amazonaws.cloudfront#Origins", "traits": { - "smithy.api#documentation": "A complex type that contains information about origins for this distribution.
", + "smithy.api#documentation": "A complex type that contains information about origins for this distribution.
", "smithy.api#required": {} } }, "OriginGroups": { "target": "com.amazonaws.cloudfront#OriginGroups", "traits": { - "smithy.api#documentation": "A complex type that contains information about origin groups for this\n\t\t\tdistribution.
" + "smithy.api#documentation": "A complex type that contains information about origin groups for this\n\t\t\tdistribution.
" } }, "DefaultCacheBehavior": { @@ -6028,32 +6028,32 @@ "CacheBehaviors": { "target": "com.amazonaws.cloudfront#CacheBehaviors", "traits": { - "smithy.api#documentation": "A complex type that contains zero or more CacheBehavior
elements.
A complex type that contains zero or more CacheBehavior
elements.
A complex type that controls the following:
\nWhether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom\n\t\t\t\t\terror messages before returning the response to the viewer.
\nHow long CloudFront caches HTTP status codes in the 4xx and 5xx range.
\nFor more information about custom error pages, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that controls the following:
\n\t\tWhether CloudFront replaces HTTP status codes in the 4xx and 5xx range with custom\n\t\t\t\t\terror messages before returning the response to the viewer.
\n\t\t\tHow long CloudFront caches HTTP status codes in the 4xx and 5xx range.
\n\t\t\tFor more information about custom error pages, see Customizing\n\t\t\t\tError Responses in the Amazon CloudFront Developer Guide.
" } }, "Comment": { "target": "com.amazonaws.cloudfront#CommentType", "traits": { - "smithy.api#documentation": "A comment to describe the distribution. The comment cannot be longer than\n\t\t\t128 characters.
", + "smithy.api#documentation": "An optional comment to describe the distribution. The comment cannot be longer than\n\t\t\t128 characters.
", "smithy.api#required": {} } }, "Logging": { "target": "com.amazonaws.cloudfront#LoggingConfig", "traits": { - "smithy.api#documentation": "A complex type that controls whether access logs are written for the\n\t\t\tdistribution.
\nFor more information about logging, see Access Logs in\n\t\t\tthe Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that controls whether access logs are written for the\n\t\t\tdistribution.
\n\t\tFor more information about logging, see Access Logs in\n\t\t\tthe Amazon CloudFront Developer Guide.
" } }, "PriceClass": { "target": "com.amazonaws.cloudfront#PriceClass", "traits": { - "smithy.api#documentation": "The price class that corresponds with the maximum price that you want to pay for CloudFront\n\t\t\tservice. If you specify PriceClass_All
, CloudFront responds to requests for your\n\t\t\tobjects from all CloudFront edge locations.
If you specify a price class other than PriceClass_All
, CloudFront serves your\n\t\t\tobjects from the CloudFront edge location that has the lowest latency among the edge locations\n\t\t\tin your price class. Viewers who are in or near regions that are excluded from your\n\t\t\tspecified price class may encounter slower performance.
For more information about price classes, see Choosing the Price\n\t\t\t\tClass for a CloudFront Distribution in the Amazon CloudFront Developer Guide.\n\t\t\tFor information about CloudFront pricing, including how price classes (such as Price Class\n\t\t\t100) map to CloudFront regions, see Amazon CloudFront\n\t\t\t\tPricing.
" + "smithy.api#documentation": "The price class that corresponds with the maximum price that you want to pay for CloudFront\n\t\t\tservice. If you specify PriceClass_All
, CloudFront responds to requests for your\n\t\t\tobjects from all CloudFront edge locations.
If you specify a price class other than PriceClass_All
, CloudFront serves your\n\t\t\tobjects from the CloudFront edge location that has the lowest latency among the edge locations\n\t\t\tin your price class. Viewers who are in or near regions that are excluded from your\n\t\t\tspecified price class may encounter slower performance.
For more information about price classes, see Choosing the Price\n\t\t\t\tClass for a CloudFront Distribution in the Amazon CloudFront Developer Guide.\n\t\t\tFor information about CloudFront pricing, including how price classes (such as Price Class\n\t\t\t100) map to CloudFront regions, see Amazon CloudFront\n\t\t\t\tPricing.
" } }, "Enabled": { @@ -6066,7 +6066,7 @@ "ViewerCertificate": { "target": "com.amazonaws.cloudfront#ViewerCertificate", "traits": { - "smithy.api#documentation": "A complex type that determines the distribution's SSL/TLS configuration for\n\t\t\tcommunicating with viewers.
" + "smithy.api#documentation": "A complex type that determines the distribution’s SSL/TLS configuration for\n\t\t\tcommunicating with viewers.
" } }, "Restrictions": { @@ -6078,19 +6078,19 @@ "WebACLId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A unique identifier that specifies the WAF web ACL, if any, to associate with this\n\t\t\tdistribution. To specify a web ACL created using the latest version of WAF, use the\n\t\t\tACL ARN, for example\n\t\t\t\tarn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a
.\n\t\t\tTo specify a web ACL created using WAF Classic, use the ACL ID, for example\n\t\t\t\t473e64fd-f30b-4765-81a0-62ad96dd167a
.
WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests\n\t\t\tthat are forwarded to CloudFront, and lets you control access to your content. Based on\n\t\t\tconditions that you specify, such as the IP addresses that requests originate from or\n\t\t\tthe values of query strings, CloudFront responds to requests either with the requested content\n\t\t\tor with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a\n\t\t\tcustom error page when a request is blocked. For more information about WAF, see the\n\t\t\t\tWAF Developer Guide.
" + "smithy.api#documentation": "A unique identifier that specifies the WAF web ACL, if any, to associate with this\n\t\t\tdistribution. To specify a web ACL created using the latest version of WAF, use the\n\t\t\tACL ARN, for example\n\t\t\t\tarn:aws:wafv2:us-east-1:123456789012:global/webacl/ExampleWebACL/473e64fd-f30b-4765-81a0-62ad96dd167a
.\n\t\t\tTo specify a web ACL created using WAF Classic, use the ACL ID, for example\n\t\t\t\t473e64fd-f30b-4765-81a0-62ad96dd167a
.
WAF is a web application firewall that lets you monitor the HTTP and HTTPS requests\n\t\t\tthat are forwarded to CloudFront, and lets you control access to your content. Based on\n\t\t\tconditions that you specify, such as the IP addresses that requests originate from or\n\t\t\tthe values of query strings, CloudFront responds to requests either with the requested content\n\t\t\tor with an HTTP 403 status code (Forbidden). You can also configure CloudFront to return a\n\t\t\tcustom error page when a request is blocked. For more information about WAF, see the\n\t\t\t\tWAF Developer Guide.
" } }, "HttpVersion": { "target": "com.amazonaws.cloudfront#HttpVersion", "traits": { - "smithy.api#documentation": "(Optional) Specify the maximum HTTP version(s) that you want viewers to use to\n\t\t\tcommunicate with CloudFront. The default value for new web distributions is\n\t\t\thttp2
. Viewers that don't support HTTP/2 automatically use an earlier HTTP\n\t\t\tversion.
For viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must\n\t\t\tsupport Server Name Indication (SNI).
\nFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name\n\t\t\tIndication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to\n\t\t\tswitch networks without losing connection. For more information about connection\n\t\t\tmigration, see Connection Migration at RFC 9000. For more information about supported\n\t\t\tTLSv1.3 ciphers, see Supported protocols and ciphers between viewers and CloudFront.
" + "smithy.api#documentation": "(Optional) Specify the maximum HTTP version(s) that you want viewers to use to\n\t\t\tcommunicate with CloudFront. The default value for new web distributions is\n\t\t\thttp2
. Viewers that don't support HTTP/2 automatically use an earlier HTTP\n\t\t\tversion.
For viewers and CloudFront to use HTTP/2, viewers must support TLSv1.2 or later, and must\n\t\t\tsupport Server Name Indication (SNI).
\n\t\tFor viewers and CloudFront to use HTTP/3, viewers must support TLSv1.3 and Server Name\n\t\t\tIndication (SNI). CloudFront supports HTTP/3 connection migration to allow the viewer to\n\t\t\tswitch networks without losing connection. For more information about connection\n\t\t\tmigration, see Connection Migration at RFC 9000. For more information about supported\n\t\t\tTLSv1.3 ciphers, see Supported protocols and ciphers between viewers and CloudFront.
" } }, "IsIPV6Enabled": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your\n\t\t\tdistribution, specify true
. If you specify false
, CloudFront\n\t\t\tresponds to IPv6 DNS requests with the DNS response code NOERROR
and with\n\t\t\tno IP addresses. This allows viewers to submit a second request, for an IPv4 address for\n\t\t\tyour distribution.
In general, you should enable IPv6 if you have users on IPv6 networks who want to\n\t\t\taccess your content. However, if you're using signed URLs or signed cookies to restrict\n\t\t\taccess to your content, and if you're using a custom policy that includes the\n\t\t\t\tIpAddress
parameter to restrict the IP addresses that can access your\n\t\t\tcontent, don't enable IPv6. If you want to restrict access to some content by IP address\n\t\t\tand not restrict access to other content (or restrict access but not by IP address), you\n\t\t\tcan create two distributions. For more information, see Creating a Signed URL Using a Custom Policy in the\n\t\t\t\tAmazon CloudFront Developer Guide.
If you're using an Route 53 Amazon Web Services Integration alias resource record set to route traffic to your CloudFront\n\t\t\tdistribution, you need to create a second alias resource record set when both of the\n\t\t\tfollowing are true:
\nYou enable IPv6 for the distribution
\nYou're using alternate domain names in the URLs for your objects
\nFor more information, see Routing\n\t\t\t\tTraffic to an Amazon CloudFront Web Distribution by Using Your Domain Name in the\n\t\t\t\tRoute 53 Amazon Web Services Integration Developer Guide.
\nIf you created a CNAME resource record set, either with Route 53 Amazon Web Services Integration or with another DNS\n\t\t\tservice, you don't need to make any changes. A CNAME record will route traffic to your\n\t\t\tdistribution regardless of the IP address format of the viewer request.
" + "smithy.api#documentation": "If you want CloudFront to respond to IPv6 DNS requests with an IPv6 address for your\n\t\t\tdistribution, specify true
. If you specify false
, CloudFront\n\t\t\tresponds to IPv6 DNS requests with the DNS response code NOERROR
and with\n\t\t\tno IP addresses. This allows viewers to submit a second request, for an IPv4 address for\n\t\t\tyour distribution.
In general, you should enable IPv6 if you have users on IPv6 networks who want to\n\t\t\taccess your content. However, if you're using signed URLs or signed cookies to restrict\n\t\t\taccess to your content, and if you're using a custom policy that includes the\n\t\t\t\tIpAddress
parameter to restrict the IP addresses that can access your\n\t\t\tcontent, don't enable IPv6. If you want to restrict access to some content by IP address\n\t\t\tand not restrict access to other content (or restrict access but not by IP address), you\n\t\t\tcan create two distributions. For more information, see Creating a Signed URL Using a Custom Policy in the\n\t\t\t\tAmazon CloudFront Developer Guide.
If you're using an Route 53 Amazon Web Services Integration alias resource record set to route traffic to your CloudFront\n\t\t\tdistribution, you need to create a second alias resource record set when both of the\n\t\t\tfollowing are true:
\n\t\tYou enable IPv6 for the distribution
\n\t\t\tYou're using alternate domain names in the URLs for your objects
\n\t\t\tFor more information, see Routing\n\t\t\t\tTraffic to an Amazon CloudFront Web Distribution by Using Your Domain Name in the\n\t\t\t\tRoute 53 Amazon Web Services Integration Developer Guide.
\n\t\tIf you created a CNAME resource record set, either with Route 53 Amazon Web Services Integration or with another DNS\n\t\t\tservice, you don't need to make any changes. A CNAME record will route traffic to your\n\t\t\tdistribution regardless of the IP address format of the viewer request.
" } }, "ContinuousDeploymentPolicyId": { @@ -6202,7 +6202,7 @@ "NextMarker": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "If IsTruncated
is true
, this element is present and contains\n\t\t\tthe value you can use for the Marker
request parameter to continue listing\n\t\t\tyour distributions where they left off.
If IsTruncated
is true
, this element is present and contains\n\t\t\tthe value you can use for the Marker
request parameter to continue listing\n\t\t\tyour distributions where they left off.
A complex type that contains information about origin groups for this\n\t\t\tdistribution.
" + "smithy.api#documentation": "A complex type that contains information about origin groups for this\n\t\t\tdistribution.
" } }, "DefaultCacheBehavior": { @@ -6339,7 +6339,7 @@ "PriceClass": { "target": "com.amazonaws.cloudfront#PriceClass", "traits": { - "smithy.api#documentation": "A complex type that contains information about price class for this streaming\n\t\t\tdistribution.
", + "smithy.api#documentation": "A complex type that contains information about price class for this streaming\n\t\t\tdistribution.
", "smithy.api#required": {} } }, @@ -6353,7 +6353,7 @@ "ViewerCertificate": { "target": "com.amazonaws.cloudfront#ViewerCertificate", "traits": { - "smithy.api#documentation": "A complex type that determines the distribution's SSL/TLS configuration for\n\t\t\tcommunicating with viewers.
", + "smithy.api#documentation": "A complex type that determines the distribution’s SSL/TLS configuration for\n\t\t\tcommunicating with viewers.
", "smithy.api#required": {} } }, @@ -6374,7 +6374,7 @@ "HttpVersion": { "target": "com.amazonaws.cloudfront#HttpVersion", "traits": { - "smithy.api#documentation": "Specify the maximum HTTP version that you want viewers to use to communicate with\n\t\t\tCloudFront. The default value for new web distributions is http2
. Viewers that\n\t\t\tdon't support HTTP/2
will automatically use an earlier version.
Specify the maximum HTTP version that you want viewers to use to communicate with\n\t\t\tCloudFront. The default value for new web distributions is http2
. Viewers that\n\t\t\tdon't support HTTP/2
will automatically use an earlier version.
Amazon Web Services services in China customers must file for an Internet Content Provider (ICP)\n\t\t\trecordal if they want to serve content publicly on an alternate domain name, also known\n\t\t\tas a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal\n\t\t\tstatus for CNAMEs associated with distributions.
\nFor more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with Amazon Web Services\n\t\t\t\tservices in China.
" + "smithy.api#documentation": "Amazon Web Services services in China customers must file for an Internet Content Provider (ICP)\n\t\t\trecordal if they want to serve content publicly on an alternate domain name, also known\n\t\t\tas a CNAME, that they've added to CloudFront. AliasICPRecordal provides the ICP recordal\n\t\t\tstatus for CNAMEs associated with distributions.
\n\t\tFor more information about ICP recordals, see Signup, Accounts, and Credentials in Getting Started with Amazon Web Services\n\t\t\t\tservices in China.
" } }, "Staging": { @@ -6418,7 +6418,7 @@ "Quantity": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "Number of field pattern items in a field-level encryption content type-profile\n\t\t\tmapping.
", + "smithy.api#documentation": "Number of field pattern items in a field-level encryption content type-profile\n\t\t\tmapping.
", "smithy.api#required": {} } }, @@ -6430,7 +6430,7 @@ } }, "traits": { - "smithy.api#documentation": "Complex data type for field-level encryption profiles that includes all of the\n\t\t\tencryption entities.
" + "smithy.api#documentation": "Complex data type for field-level encryption profiles that includes all of the\n\t\t\tencryption entities.
" } }, "com.amazonaws.cloudfront#EncryptionEntity": { @@ -6439,7 +6439,7 @@ "PublicKeyId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The public key associated with a set of field-level encryption patterns, to be used\n\t\t\twhen encrypting the fields that match the patterns.
", + "smithy.api#documentation": "The public key associated with a set of field-level encryption patterns, to be used\n\t\t\twhen encrypting the fields that match the patterns.
", "smithy.api#required": {} } }, @@ -6453,13 +6453,13 @@ "FieldPatterns": { "target": "com.amazonaws.cloudfront#FieldPatterns", "traits": { - "smithy.api#documentation": "Field patterns in a field-level encryption content type profile specify the fields\n\t\t\tthat you want to be encrypted. You can provide the full field name, or any beginning\n\t\t\tcharacters followed by a wildcard (*). You can't overlap field patterns. For example,\n\t\t\tyou can't have both ABC* and AB*. Note that field patterns are case-sensitive.
", + "smithy.api#documentation": "Field patterns in a field-level encryption content type profile specify the fields\n\t\t\tthat you want to be encrypted. You can provide the full field name, or any beginning\n\t\t\tcharacters followed by a wildcard (*). You can't overlap field patterns. For example,\n\t\t\tyou can't have both ABC* and AB*. Note that field patterns are case-sensitive.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Complex data type for field-level encryption profiles that includes the encryption key\n\t\t\tand field pattern specifications.
" + "smithy.api#documentation": "Complex data type for field-level encryption profiles that includes the encryption key\n\t\t\tand field pattern specifications.
" } }, "com.amazonaws.cloudfront#EncryptionEntityList": { @@ -6540,20 +6540,20 @@ "LastModifiedTime": { "target": "com.amazonaws.cloudfront#timestamp", "traits": { - "smithy.api#documentation": "The last time the field-level encryption configuration was changed.
", + "smithy.api#documentation": "The last time the field-level encryption configuration was changed.
", "smithy.api#required": {} } }, "FieldLevelEncryptionConfig": { "target": "com.amazonaws.cloudfront#FieldLevelEncryptionConfig", "traits": { - "smithy.api#documentation": "A complex data type that includes the profile configurations specified for field-level\n\t\t\tencryption.
", + "smithy.api#documentation": "A complex data type that includes the profile configurations specified for field-level\n\t\t\tencryption.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A complex data type that includes the profile configurations and other options\n\t\t\tspecified for field-level encryption.
" + "smithy.api#documentation": "A complex data type that includes the profile configurations and other options\n\t\t\tspecified for field-level encryption.
" } }, "com.amazonaws.cloudfront#FieldLevelEncryptionConfig": { @@ -6586,7 +6586,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex data type that includes the profile configurations specified for field-level\n\t\t\tencryption.
" + "smithy.api#documentation": "A complex data type that includes the profile configurations specified for field-level\n\t\t\tencryption.
" } }, "com.amazonaws.cloudfront#FieldLevelEncryptionConfigAlreadyExists": { @@ -6627,7 +6627,7 @@ "MaxItems": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The maximum number of elements you want in the response body.
", + "smithy.api#documentation": "The maximum number of elements you want in the response body.
", "smithy.api#required": {} } }, @@ -6863,13 +6863,13 @@ "QueryArgProfileConfig": { "target": "com.amazonaws.cloudfront#QueryArgProfileConfig", "traits": { - "smithy.api#documentation": "A summary of a query argument-profile mapping.
" + "smithy.api#documentation": "A summary of a query argument-profile mapping.
" } }, "ContentTypeProfileConfig": { "target": "com.amazonaws.cloudfront#ContentTypeProfileConfig", "traits": { - "smithy.api#documentation": "A summary of a content type-profile mapping.
" + "smithy.api#documentation": "A summary of a content type-profile mapping.
" } } }, @@ -6942,32 +6942,32 @@ "QueryString": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include query strings in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send query strings to the origin but not include them in the cache key,\n\t\t\tuse an origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nIndicates whether you want CloudFront to forward query strings to the origin that is\n\t\t\tassociated with this cache behavior and cache based on the query string parameters. CloudFront\n\t\t\tbehavior depends on the value of QueryString
and on the values that you\n\t\t\tspecify for QueryStringCacheKeys
, if any:
If you specify true for QueryString
and you don't specify any values for\n\t\t\t\tQueryStringCacheKeys
, CloudFront forwards all query string parameters to the\n\t\t\torigin and caches based on all query string parameters. Depending on how many query\n\t\t\tstring parameters and values you have, this can adversely affect performance because\n\t\t\tCloudFront must forward more requests to the origin.
If you specify true for QueryString
and you specify one or more values\n\t\t\tfor QueryStringCacheKeys
, CloudFront forwards all query string parameters to the\n\t\t\torigin, but it only caches based on the query string parameters that you specify.
If you specify false for QueryString
, CloudFront doesn't forward any query\n\t\t\tstring parameters to the origin, and doesn't cache based on query string\n\t\t\tparameters.
For more information, see Configuring\n\t\t\t\tCloudFront to Cache Based on Query String Parameters in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include query strings in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send query strings to the origin but not include them in the cache key,\n\t\t\tuse an origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIndicates whether you want CloudFront to forward query strings to the origin that is\n\t\t\tassociated with this cache behavior and cache based on the query string parameters. CloudFront\n\t\t\tbehavior depends on the value of QueryString
and on the values that you\n\t\t\tspecify for QueryStringCacheKeys
, if any:
If you specify true for QueryString
and you don't specify any values for\n\t\t\t\tQueryStringCacheKeys
, CloudFront forwards all query string parameters to the\n\t\t\torigin and caches based on all query string parameters. Depending on how many query\n\t\t\tstring parameters and values you have, this can adversely affect performance because\n\t\t\tCloudFront must forward more requests to the origin.
If you specify true for QueryString
and you specify one or more values\n\t\t\tfor QueryStringCacheKeys
, CloudFront forwards all query string parameters to the\n\t\t\torigin, but it only caches based on the query string parameters that you specify.
If you specify false for QueryString
, CloudFront doesn't forward any query\n\t\t\tstring parameters to the origin, and doesn't cache based on query string\n\t\t\tparameters.
For more information, see Configuring\n\t\t\t\tCloudFront to Cache Based on Query String Parameters in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, "Cookies": { "target": "com.amazonaws.cloudfront#CookiePreference", "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include cookies in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send cookies to the origin but not include them in the cache key, use\n\t\t\tan origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nA complex type that specifies whether you want CloudFront to forward cookies to the origin\n\t\t\tand, if so, which ones. For more information about forwarding cookies to the origin, see\n\t\t\t\tHow CloudFront Forwards, Caches,\n\t\t\t\tand Logs Cookies in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include cookies in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send cookies to the origin but not include them in the cache key, use\n\t\t\tan origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA complex type that specifies whether you want CloudFront to forward cookies to the origin\n\t\t\tand, if so, which ones. For more information about forwarding cookies to the origin, see\n\t\t\t\tHow CloudFront Forwards, Caches,\n\t\t\t\tand Logs Cookies in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, "Headers": { "target": "com.amazonaws.cloudfront#Headers", "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include headers in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send headers to the origin but not include them in the cache key, use\n\t\t\tan origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nA complex type that specifies the Headers
, if any, that you want CloudFront to\n\t\t\tforward to the origin for this cache behavior (whitelisted headers). For the headers\n\t\t\tthat you specify, CloudFront also caches separate versions of a specified object that is based\n\t\t\ton the header values in viewer requests.
For more information, see Caching Content\n\t\t\t\tBased on Request Headers in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include headers in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send headers to the origin but not include them in the cache key, use\n\t\t\tan origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA complex type that specifies the Headers
, if any, that you want CloudFront to\n\t\t\tforward to the origin for this cache behavior (whitelisted headers). For the headers\n\t\t\tthat you specify, CloudFront also caches separate versions of a specified object that is based\n\t\t\ton the header values in viewer requests.
For more information, see Caching Content\n\t\t\t\tBased on Request Headers in the Amazon CloudFront Developer Guide.
" } }, "QueryStringCacheKeys": { "target": "com.amazonaws.cloudfront#QueryStringCacheKeys", "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include query strings in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send query strings to the origin but not include them in the cache key,\n\t\t\tuse an origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nA complex type that contains information about the query string parameters that you\n\t\t\twant CloudFront to use for caching for this cache behavior.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include query strings in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send query strings to the origin but not include them in the cache key,\n\t\t\tuse an origin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA complex type that contains information about the query string parameters that you\n\t\t\twant CloudFront to use for caching for this cache behavior.
" } } }, "traits": { - "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include values in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\nIf you want to send values to the origin but not include them in the cache key, use an\n\t\t\torigin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\nA complex type that specifies how CloudFront handles query strings, cookies, and HTTP\n\t\t\theaders.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include values in the cache key, use a cache policy. For more\n\t\t\tinformation, see Creating cache policies in the Amazon CloudFront Developer Guide.
\n\t\tIf you want to send values to the origin but not include them in the cache key, use an\n\t\t\torigin request policy. For more information, see Creating origin request policies in the\n\t\t\tAmazon CloudFront Developer Guide.
\n\t\tA complex type that specifies how CloudFront handles query strings, cookies, and HTTP\n\t\t\theaders.
" } }, "com.amazonaws.cloudfront#FrameOptionsList": { @@ -7085,7 +7085,7 @@ "Runtime": { "target": "com.amazonaws.cloudfront#FunctionRuntime", "traits": { - "smithy.api#documentation": "The function's runtime environment. The only valid value is\n\t\t\t\tcloudfront-js-1.0
.
The function’s runtime environment. The only valid value is\n\t\t\t\tcloudfront-js-1.0
.
Cannot delete the function because it's attached to one or more cache\n\t\t\tbehaviors.
", + "smithy.api#documentation": "Cannot delete the function because it’s attached to one or more cache\n\t\t\tbehaviors.
", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -7173,7 +7173,7 @@ "Stage": { "target": "com.amazonaws.cloudfront#FunctionStage", "traits": { - "smithy.api#documentation": "The stage that the function is in, either DEVELOPMENT
or\n\t\t\t\tLIVE
.
When a function is in the DEVELOPMENT
stage, you can test the function\n\t\t\twith TestFunction
, and update it with UpdateFunction
.
When a function is in the LIVE
stage, you can attach the function to a\n\t\t\tdistribution's cache behavior, using the function's ARN.
The stage that the function is in, either DEVELOPMENT
or\n\t\t\t\tLIVE
.
When a function is in the DEVELOPMENT
stage, you can test the function\n\t\t\twith TestFunction
, and update it with UpdateFunction
.
When a function is in the LIVE
stage, you can attach the function to a\n\t\t\tdistribution’s cache behavior, using the function’s ARN.
The method that you want to use to restrict distribution of your content by\n\t\t\tcountry:
\n\n none
: No geo restriction is enabled, meaning access to content is\n\t\t\t\t\tnot restricted by client geo location.
\n blacklist
: The Location
elements specify the\n\t\t\t\t\tcountries in which you don't want CloudFront to distribute your content.
\n whitelist
: The Location
elements specify the\n\t\t\t\t\tcountries in which you want CloudFront to distribute your content.
The method that you want to use to restrict distribution of your content by\n\t\t\tcountry:
\n\t\t\n none
: No geo restriction is enabled, meaning access to content is\n\t\t\t\t\tnot restricted by client geo location.
\n blacklist
: The Location
elements specify the\n\t\t\t\t\tcountries in which you don't want CloudFront to distribute your content.
\n whitelist
: The Location
elements specify the\n\t\t\t\t\tcountries in which you want CloudFront to distribute your content.
A complex type that contains a Location
element for each country in\n\t\t\twhich you want CloudFront either to distribute your content (whitelist
) or not\n\t\t\tdistribute your content (blacklist
).
The Location
element is a two-letter, uppercase country code for a\n\t\t\tcountry that you want to include in your blacklist
or\n\t\t\t\twhitelist
. Include one Location
element for each\n\t\t\tcountry.
CloudFront and MaxMind
both use ISO 3166
country codes. For the\n\t\t\tcurrent list of countries and the corresponding codes, see ISO\n\t\t\t\t3166-1-alpha-2
code on the International Organization for\n\t\t\t\tStandardization website. You can also refer to the country list on the\n\t\t\tCloudFront console, which includes both country names and codes.
A complex type that contains a Location
element for each country in\n\t\t\twhich you want CloudFront either to distribute your content (whitelist
) or not\n\t\t\tdistribute your content (blacklist
).
The Location
element is a two-letter, uppercase country code for a\n\t\t\tcountry that you want to include in your blacklist
or\n\t\t\t\twhitelist
. Include one Location
element for each\n\t\t\tcountry.
CloudFront and MaxMind
both use ISO 3166
country codes. For the\n\t\t\tcurrent list of countries and the corresponding codes, see ISO\n\t\t\t\t3166-1-alpha-2
code on the International Organization for\n\t\t\t\tStandardization website. You can also refer to the country list on the\n\t\t\tCloudFront console, which includes both country names and codes.
Gets a cache policy, including the following metadata:
\nThe policy's identifier.
\nThe date and time when the policy was last modified.
\nTo get a cache policy, you must provide the policy's identifier. If the cache policy\n\t\t\tis attached to a distribution's cache behavior, you can get the policy's identifier\n\t\t\tusing ListDistributions
or GetDistribution
. If the cache\n\t\t\tpolicy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
Gets a cache policy, including the following metadata:
\n\t\tThe policy’s identifier.
\n\t\t\tThe date and time when the policy was last modified.
\n\t\t\tTo get a cache policy, you must provide the policy’s identifier. If the cache policy\n\t\t\tis attached to a distribution’s cache behavior, you can get the policy’s identifier\n\t\t\tusing ListDistributions
or GetDistribution
. If the cache\n\t\t\tpolicy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
Gets a cache policy configuration.
\nTo get a cache policy configuration, you must provide the policy's identifier. If the\n\t\t\tcache policy is attached to a distribution's cache behavior, you can get the policy's\n\t\t\tidentifier using ListDistributions
or GetDistribution
. If the\n\t\t\tcache policy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
Gets a cache policy configuration.
\n\t\tTo get a cache policy configuration, you must provide the policy’s identifier. If the\n\t\t\tcache policy is attached to a distribution’s cache behavior, you can get the policy’s\n\t\t\tidentifier using ListDistributions
or GetDistribution
. If the\n\t\t\tcache policy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
The unique identifier for the cache policy. If the cache policy is attached to a\n\t\t\tdistribution's cache behavior, you can get the policy's identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the cache policy\n\t\t\tis not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
The unique identifier for the cache policy. If the cache policy is attached to a\n\t\t\tdistribution’s cache behavior, you can get the policy’s identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the cache policy\n\t\t\tis not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
The unique identifier for the cache policy. If the cache policy is attached to a\n\t\t\tdistribution's cache behavior, you can get the policy's identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the cache policy\n\t\t\tis not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
The unique identifier for the cache policy. If the cache policy is attached to a\n\t\t\tdistribution’s cache behavior, you can get the policy’s identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the cache policy\n\t\t\tis not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListCachePolicies
.
Get the information about an origin access identity.
", + "smithy.api#documentation": "Get the information about an origin access identity.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/origin-access-identity/cloudfront/{Id}", @@ -7496,7 +7496,7 @@ } ], "traits": { - "smithy.api#documentation": "Get the configuration information about an origin access identity.
", + "smithy.api#documentation": "Get the configuration information about an origin access identity.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/origin-access-identity/cloudfront/{Id}/config", @@ -7510,7 +7510,7 @@ "Id": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The identity's ID.
", + "smithy.api#documentation": "The identity's ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -7526,7 +7526,7 @@ "CloudFrontOriginAccessIdentityConfig": { "target": "com.amazonaws.cloudfront#CloudFrontOriginAccessIdentityConfig", "traits": { - "smithy.api#documentation": "The origin access identity's configuration information.
", + "smithy.api#documentation": "The origin access identity's configuration information.
", "smithy.api#httpPayload": {} } }, @@ -7597,7 +7597,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a continuous deployment policy, including metadata (the policy's identifier and\n\t\t\tthe date and time when the policy was last modified).
", + "smithy.api#documentation": "Gets a continuous deployment policy, including metadata (the policy’s identifier and\n\t\t\tthe date and time when the policy was last modified).
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/continuous-deployment-policy/{Id}", @@ -7753,7 +7753,7 @@ } ], "traits": { - "smithy.api#documentation": "Get the configuration information about a distribution.
", + "smithy.api#documentation": "Get the configuration information about a distribution.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/distribution/{Id}/config", @@ -8082,7 +8082,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets the code of a CloudFront function. To get configuration information and metadata about\n\t\t\ta function, use DescribeFunction
.
To get a function's code, you must provide the function's name and stage. To get these\n\t\t\tvalues, you can use ListFunctions
.
Gets the code of a CloudFront function. To get configuration information and metadata about\n\t\t\ta function, use DescribeFunction
.
To get a function’s code, you must provide the function’s name and stage. To get these\n\t\t\tvalues, you can use ListFunctions
.
The function's stage, either DEVELOPMENT
or LIVE
.
The function’s stage, either DEVELOPMENT
or LIVE
.
Get the information about an invalidation.
", + "smithy.api#documentation": "Get the information about an invalidation.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/distribution/{DistributionId}/invalidation/{Id}", @@ -8203,7 +8203,7 @@ } }, "traits": { - "smithy.api#documentation": "The request to get an invalidation's information.
" + "smithy.api#documentation": "The request to get an invalidation's information.
" } }, "com.amazonaws.cloudfront#GetInvalidationResult": { @@ -8212,7 +8212,7 @@ "Invalidation": { "target": "com.amazonaws.cloudfront#Invalidation", "traits": { - "smithy.api#documentation": "The invalidation's information. For more information, see Invalidation\n\t\t\t\tComplex Type.
", + "smithy.api#documentation": "The invalidation's information. For more information, see Invalidation\n\t\t\t\tComplex Type.
", "smithy.api#httpPayload": {} } } @@ -8235,7 +8235,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a key group, including the date and time when the key group was last\n\t\t\tmodified.
\nTo get a key group, you must provide the key group's identifier. If the key group is\n\t\t\treferenced in a distribution's cache behavior, you can get the key group's identifier\n\t\t\tusing ListDistributions
or GetDistribution
. If the key group\n\t\t\tis not referenced in a cache behavior, you can get the identifier using\n\t\t\t\tListKeyGroups
.
Gets a key group, including the date and time when the key group was last\n\t\t\tmodified.
\n\t\tTo get a key group, you must provide the key group’s identifier. If the key group is\n\t\t\treferenced in a distribution’s cache behavior, you can get the key group’s identifier\n\t\t\tusing ListDistributions
or GetDistribution
. If the key group\n\t\t\tis not referenced in a cache behavior, you can get the identifier using\n\t\t\t\tListKeyGroups
.
Gets a key group configuration.
\nTo get a key group configuration, you must provide the key group's identifier. If the\n\t\t\tkey group is referenced in a distribution's cache behavior, you can get the key group's\n\t\t\tidentifier using ListDistributions
or GetDistribution
. If the\n\t\t\tkey group is not referenced in a cache behavior, you can get the identifier using\n\t\t\t\tListKeyGroups
.
Gets a key group configuration.
\n\t\tTo get a key group configuration, you must provide the key group’s identifier. If the\n\t\t\tkey group is referenced in a distribution’s cache behavior, you can get the key group’s\n\t\t\tidentifier using ListDistributions
or GetDistribution
. If the\n\t\t\tkey group is not referenced in a cache behavior, you can get the identifier using\n\t\t\t\tListKeyGroups
.
Gets an origin request policy, including the following metadata:
\nThe policy's identifier.
\nThe date and time when the policy was last modified.
\nTo get an origin request policy, you must provide the policy's identifier. If the\n\t\t\torigin request policy is attached to a distribution's cache behavior, you can get the\n\t\t\tpolicy's identifier using ListDistributions
or\n\t\t\tGetDistribution
. If the origin request policy is not attached to a cache\n\t\t\tbehavior, you can get the identifier using\n\t\t\tListOriginRequestPolicies
.
Gets an origin request policy, including the following metadata:
\n\t\tThe policy’s identifier.
\n\t\t\tThe date and time when the policy was last modified.
\n\t\t\tTo get an origin request policy, you must provide the policy’s identifier. If the\n\t\t\torigin request policy is attached to a distribution’s cache behavior, you can get the\n\t\t\tpolicy’s identifier using ListDistributions
or\n\t\t\tGetDistribution
. If the origin request policy is not attached to a cache\n\t\t\tbehavior, you can get the identifier using\n\t\t\tListOriginRequestPolicies
.
Gets an origin request policy configuration.
\nTo get an origin request policy configuration, you must provide the policy's\n\t\t\tidentifier. If the origin request policy is attached to a distribution's cache behavior,\n\t\t\tyou can get the policy's identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the origin request policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\tListOriginRequestPolicies
.
Gets an origin request policy configuration.
\n\t\tTo get an origin request policy configuration, you must provide the policy’s\n\t\t\tidentifier. If the origin request policy is attached to a distribution’s cache behavior,\n\t\t\tyou can get the policy’s identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the origin request policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\tListOriginRequestPolicies
.
The unique identifier for the origin request policy. If the origin request policy is\n\t\t\tattached to a distribution's cache behavior, you can get the policy's identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the origin\n\t\t\trequest policy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListOriginRequestPolicies
.
The unique identifier for the origin request policy. If the origin request policy is\n\t\t\tattached to a distribution’s cache behavior, you can get the policy’s identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the origin\n\t\t\trequest policy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListOriginRequestPolicies
.
The unique identifier for the origin request policy. If the origin request policy is\n\t\t\tattached to a distribution's cache behavior, you can get the policy's identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the origin\n\t\t\trequest policy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListOriginRequestPolicies
.
The unique identifier for the origin request policy. If the origin request policy is\n\t\t\tattached to a distribution’s cache behavior, you can get the policy’s identifier using\n\t\t\t\tListDistributions
or GetDistribution
. If the origin\n\t\t\trequest policy is not attached to a cache behavior, you can get the identifier using\n\t\t\t\tListOriginRequestPolicies
.
Gets a real-time log configuration.
\nTo get a real-time log configuration, you can provide the configuration's name or its\n\t\t\tAmazon Resource Name (ARN). You must provide at least one. If you provide both, CloudFront\n\t\t\tuses the name to identify the real-time log configuration to get.
", + "smithy.api#documentation": "Gets a real-time log configuration.
\n\t\tTo get a real-time log configuration, you can provide the configuration’s name or its\n\t\t\tAmazon Resource Name (ARN). You must provide at least one. If you provide both, CloudFront\n\t\t\tuses the name to identify the real-time log configuration to get.
", "smithy.api#http": { "method": "POST", "uri": "/2020-05-31/get-realtime-log-config", @@ -8800,7 +8800,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a response headers policy, including metadata (the policy's identifier and the\n\t\t\tdate and time when the policy was last modified).
\nTo get a response headers policy, you must provide the policy's identifier. If the\n\t\t\tresponse headers policy is attached to a distribution's cache behavior, you can get the\n\t\t\tpolicy's identifier using ListDistributions
or\n\t\t\tGetDistribution
. If the response headers policy is not attached to a cache\n\t\t\tbehavior, you can get the identifier using\n\t\t\tListResponseHeadersPolicies
.
Gets a response headers policy, including metadata (the policy’s identifier and the\n\t\t\tdate and time when the policy was last modified).
\n\t\tTo get a response headers policy, you must provide the policy’s identifier. If the\n\t\t\tresponse headers policy is attached to a distribution’s cache behavior, you can get the\n\t\t\tpolicy’s identifier using ListDistributions
or\n\t\t\tGetDistribution
. If the response headers policy is not attached to a cache\n\t\t\tbehavior, you can get the identifier using\n\t\t\tListResponseHeadersPolicies
.
Gets a response headers policy configuration.
\nTo get a response headers policy configuration, you must provide the policy's\n\t\t\tidentifier. If the response headers policy is attached to a distribution's cache\n\t\t\tbehavior, you can get the policy's identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the response headers policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\t\tListResponseHeadersPolicies
.
Gets a response headers policy configuration.
\n\t\tTo get a response headers policy configuration, you must provide the policy’s\n\t\t\tidentifier. If the response headers policy is attached to a distribution’s cache\n\t\t\tbehavior, you can get the policy’s identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the response headers policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\t\tListResponseHeadersPolicies
.
The identifier for the response headers policy.
\nIf the response headers policy is attached to a distribution's cache behavior, you can\n\t\t\tget the policy's identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the response headers policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\t\tListResponseHeadersPolicies
.
The identifier for the response headers policy.
\n\t\tIf the response headers policy is attached to a distribution’s cache behavior, you can\n\t\t\tget the policy’s identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the response headers policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\t\tListResponseHeadersPolicies
.
The identifier for the response headers policy.
\nIf the response headers policy is attached to a distribution's cache behavior, you can\n\t\t\tget the policy's identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the response headers policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\t\tListResponseHeadersPolicies
.
The identifier for the response headers policy.
\n\t\tIf the response headers policy is attached to a distribution’s cache behavior, you can\n\t\t\tget the policy’s identifier using ListDistributions
or\n\t\t\t\tGetDistribution
. If the response headers policy is not attached to a\n\t\t\tcache behavior, you can get the identifier using\n\t\t\t\tListResponseHeadersPolicies
.
Get the configuration information about a streaming distribution.
", + "smithy.api#documentation": "Get the configuration information about a streaming distribution.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/streaming-distribution/{Id}/config", @@ -9546,20 +9546,20 @@ "CreateTime": { "target": "com.amazonaws.cloudfront#timestamp", "traits": { - "smithy.api#documentation": "The date and time the invalidation request was first made.
", + "smithy.api#documentation": "The date and time the invalidation request was first made.
", "smithy.api#required": {} } }, "InvalidationBatch": { "target": "com.amazonaws.cloudfront#InvalidationBatch", "traits": { - "smithy.api#documentation": "The current invalidation information for the batch request.
", + "smithy.api#documentation": "The current invalidation information for the batch request.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "An invalidation.
" + "smithy.api#documentation": "An invalidation.
" } }, "com.amazonaws.cloudfront#InvalidationBatch": { @@ -9568,14 +9568,14 @@ "Paths": { "target": "com.amazonaws.cloudfront#Paths", "traits": { - "smithy.api#documentation": "A complex type that contains information about the objects that you want to\n\t\t\tinvalidate. For more information, see Specifying the Objects to Invalidate in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "A complex type that contains information about the objects that you want to\n\t\t\tinvalidate. For more information, see Specifying the Objects to Invalidate in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, "CallerReference": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A value that you specify to uniquely identify an invalidation request. CloudFront uses the\n\t\t\tvalue to prevent you from accidentally resubmitting an identical request. Whenever you\n\t\t\tcreate a new invalidation request, you must specify a new value for\n\t\t\t\tCallerReference
and change other values in the request as applicable.\n\t\t\tOne way to ensure that the value of CallerReference
is unique is to use a\n\t\t\t\ttimestamp
, for example, 20120301090000
.
If you make a second invalidation request with the same value for\n\t\t\t\tCallerReference
, and if the rest of the request is the same, CloudFront\n\t\t\tdoesn't create a new invalidation request. Instead, CloudFront returns information about the\n\t\t\tinvalidation request that you previously created with the same\n\t\t\t\tCallerReference
.
If CallerReference
is a value you already sent in a previous invalidation\n\t\t\tbatch request but the content of any Path
is different from the original\n\t\t\trequest, CloudFront returns an InvalidationBatchAlreadyExists
error.
A value that you specify to uniquely identify an invalidation request. CloudFront uses the\n\t\t\tvalue to prevent you from accidentally resubmitting an identical request. Whenever you\n\t\t\tcreate a new invalidation request, you must specify a new value for\n\t\t\t\tCallerReference
and change other values in the request as applicable.\n\t\t\tOne way to ensure that the value of CallerReference
is unique is to use a\n\t\t\t\ttimestamp
, for example, 20120301090000
.
If you make a second invalidation request with the same value for\n\t\t\t\tCallerReference
, and if the rest of the request is the same, CloudFront\n\t\t\tdoesn't create a new invalidation request. Instead, CloudFront returns information about the\n\t\t\tinvalidation request that you previously created with the same\n\t\t\t\tCallerReference
.
If CallerReference
is a value you already sent in a previous invalidation\n\t\t\tbatch request but the content of any Path
is different from the original\n\t\t\trequest, CloudFront returns an InvalidationBatchAlreadyExists
error.
A key group.
\nA key group contains a list of public keys that you can use with CloudFront signed URLs and signed cookies.
" + "smithy.api#documentation": "A key group.
\n\t\tA key group contains a list of public keys that you can use with CloudFront signed URLs and signed cookies.
" } }, "com.amazonaws.cloudfront#KeyGroupAlreadyExists": { @@ -9786,7 +9786,7 @@ } }, "traits": { - "smithy.api#documentation": "A key group configuration.
\nA key group contains a list of public keys that you can use with CloudFront signed URLs and signed cookies.
" + "smithy.api#documentation": "A key group configuration.
\n\t\tA key group contains a list of public keys that you can use with CloudFront signed URLs and signed cookies.
" } }, "com.amazonaws.cloudfront#KeyGroupList": { @@ -9883,7 +9883,7 @@ "RoleARN": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFront can use to\n\t\t\tsend real-time log data to your Kinesis data stream.
\nFor more information the IAM role, see Real-time log configuration IAM role in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an Identity and Access Management (IAM) role that CloudFront can use to\n\t\t\tsend real-time log data to your Kinesis data stream.
\n\t\tFor more information the IAM role, see Real-time log configuration IAM role in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, @@ -9915,7 +9915,7 @@ "EventType": { "target": "com.amazonaws.cloudfront#EventType", "traits": { - "smithy.api#documentation": "Specifies the event type that triggers a Lambda@Edge function invocation. You can\n\t\t\tspecify the following values:
\n\n viewer-request
: The function executes when CloudFront receives a\n\t\t\t\t\trequest from a viewer and before it checks to see whether the requested object\n\t\t\t\t\tis in the edge cache.
\n origin-request
: The function executes only when CloudFront sends a\n\t\t\t\t\trequest to your origin. When the requested object is in the edge cache, the\n\t\t\t\t\tfunction doesn't execute.
\n origin-response
: The function executes after CloudFront receives a\n\t\t\t\t\tresponse from the origin and before it caches the object in the response. When\n\t\t\t\t\tthe requested object is in the edge cache, the function doesn't execute.
\n viewer-response
: The function executes before CloudFront returns the\n\t\t\t\t\trequested object to the viewer. The function executes regardless of whether the\n\t\t\t\t\tobject was already in the edge cache.
If the origin returns an HTTP status code other than HTTP 200 (OK), the\n\t\t\t\t\tfunction doesn't execute.
\nSpecifies the event type that triggers a Lambda@Edge function invocation. You can\n\t\t\tspecify the following values:
\n\t\t\n viewer-request
: The function executes when CloudFront receives a\n\t\t\t\t\trequest from a viewer and before it checks to see whether the requested object\n\t\t\t\t\tis in the edge cache.
\n origin-request
: The function executes only when CloudFront sends a\n\t\t\t\t\trequest to your origin. When the requested object is in the edge cache, the\n\t\t\t\t\tfunction doesn't execute.
\n origin-response
: The function executes after CloudFront receives a\n\t\t\t\t\tresponse from the origin and before it caches the object in the response. When\n\t\t\t\t\tthe requested object is in the edge cache, the function doesn't execute.
\n viewer-response
: The function executes before CloudFront returns the\n\t\t\t\t\trequested object to the viewer. The function executes regardless of whether the\n\t\t\t\t\tobject was already in the edge cache.
If the origin returns an HTTP status code other than HTTP 200 (OK), the\n\t\t\t\t\tfunction doesn't execute.
\n\t\t\tA complex type that specifies a list of Lambda@Edge functions associations for a cache\n\t\t\tbehavior.
\nIf you want to invoke one or more Lambda@Edge functions triggered by requests that\n\t\t\tmatch the PathPattern
of the cache behavior, specify the applicable values\n\t\t\tfor Quantity
and Items
. Note that there can be up to 4\n\t\t\t\tLambdaFunctionAssociation
items in this list (one for each possible\n\t\t\tvalue of EventType
) and each EventType
can be associated with\n\t\t\tonly one function.
If you don't want to invoke any Lambda@Edge functions for the requests that match\n\t\t\t\tPathPattern
, specify 0
for Quantity
and omit\n\t\t\t\tItems
.
A complex type that specifies a list of Lambda@Edge functions associations for a cache\n\t\t\tbehavior.
\n\n\t\tIf you want to invoke one or more Lambda@Edge functions triggered by requests that\n\t\t\tmatch the PathPattern
of the cache behavior, specify the applicable values\n\t\t\tfor Quantity
and Items
. Note that there can be up to 4\n\t\t\t\tLambdaFunctionAssociation
items in this list (one for each possible\n\t\t\tvalue of EventType
) and each EventType
can be associated with\n\t\t\tonly one function.
If you don't want to invoke any Lambda@Edge functions for the requests that match\n\t\t\t\tPathPattern
, specify 0
for Quantity
and omit\n\t\t\t\tItems
.
Gets a list of cache policies.
\nYou can optionally apply a filter to return only the managed policies created by\n\t\t\tAmazon Web Services, or only the custom policies created in your Amazon Web Services account.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of cache policies.
\n\t\tYou can optionally apply a filter to return only the managed policies created by\n\t\t\tAmazon Web Services, or only the custom policies created in your Amazon Web Services account.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
A filter to return only the specified kinds of cache policies. Valid values\n\t\t\tare:
\n\n managed
– Returns only the managed policies created by\n\t\t\t\t\tAmazon Web Services.
\n custom
– Returns only the custom policies created in your\n\t\t\t\t\tAmazon Web Services account.
A filter to return only the specified kinds of cache policies. Valid values\n\t\t\tare:
\n\t\t\n managed
– Returns only the managed policies created by\n\t\t\t\t\tAmazon Web Services.
\n custom
– Returns only the custom policies created in your\n\t\t\t\t\tAmazon Web Services account.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tcache policies. The response includes cache policies in the list that occur after the\n\t\t\tmarker. To get the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tcache policies. The response includes cache policies in the list that occur after the\n\t\t\tmarker. To get the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
The maximum number of origin access identities you want in the response body.
", + "smithy.api#documentation": "The maximum number of origin access identities you want in the response body.
", "smithy.api#httpQuery": "MaxItems" } } }, "traits": { - "smithy.api#documentation": "The request to list origin access identities.
" + "smithy.api#documentation": "The request to list origin access identities.
" } }, "com.amazonaws.cloudfront#ListCloudFrontOriginAccessIdentitiesResult": { @@ -10082,13 +10082,13 @@ "CloudFrontOriginAccessIdentityList": { "target": "com.amazonaws.cloudfront#CloudFrontOriginAccessIdentityList", "traits": { - "smithy.api#documentation": "The CloudFrontOriginAccessIdentityList
type.
The CloudFrontOriginAccessIdentityList
type.
The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#ListConflictingAliases": { @@ -10108,7 +10108,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a list of aliases (also called CNAMEs or alternate domain names) that conflict or\n\t\t\toverlap with the provided alias, and the associated CloudFront distributions and Amazon Web Services\n\t\t\taccounts for each conflicting alias. In the returned list, the distribution and account\n\t\t\tIDs are partially hidden, which allows you to identify the distributions and accounts\n\t\t\tthat you own, but helps to protect the information of ones that you don't own.
\nUse this operation to find aliases that are in use in CloudFront that conflict or overlap\n\t\t\twith the provided alias. For example, if you provide www.example.com
as\n\t\t\tinput, the returned list can include www.example.com
and the overlapping\n\t\t\twildcard alternate domain name (*.example.com
), if they exist. If you\n\t\t\tprovide *.example.com
as input, the returned list can include\n\t\t\t\t*.example.com
and any alternate domain names covered by that wildcard\n\t\t\t(for example, www.example.com
, test.example.com
,\n\t\t\t\tdev.example.com
, and so on), if they exist.
To list conflicting aliases, you provide the alias to search and the ID of a\n\t\t\tdistribution in your account that has an attached SSL/TLS certificate that includes the\n\t\t\tprovided alias. For more information, including how to set up the distribution and\n\t\t\tcertificate, see Moving an alternate domain name to a different\n\t\t\t\tdistribution in the Amazon CloudFront Developer Guide.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of aliases (also called CNAMEs or alternate domain names) that conflict or\n\t\t\toverlap with the provided alias, and the associated CloudFront distributions and Amazon Web Services\n\t\t\taccounts for each conflicting alias. In the returned list, the distribution and account\n\t\t\tIDs are partially hidden, which allows you to identify the distributions and accounts\n\t\t\tthat you own, but helps to protect the information of ones that you don’t own.
\n\t\tUse this operation to find aliases that are in use in CloudFront that conflict or overlap\n\t\t\twith the provided alias. For example, if you provide www.example.com
as\n\t\t\tinput, the returned list can include www.example.com
and the overlapping\n\t\t\twildcard alternate domain name (*.example.com
), if they exist. If you\n\t\t\tprovide *.example.com
as input, the returned list can include\n\t\t\t\t*.example.com
and any alternate domain names covered by that wildcard\n\t\t\t(for example, www.example.com
, test.example.com
,\n\t\t\t\tdev.example.com
, and so on), if they exist.
To list conflicting aliases, you provide the alias to search and the ID of a\n\t\t\tdistribution in your account that has an attached SSL/TLS certificate that includes the\n\t\t\tprovided alias. For more information, including how to set up the distribution and\n\t\t\tcertificate, see Moving an alternate domain name to a different\n\t\t\t\tdistribution in the Amazon CloudFront Developer Guide.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in the list of\n\t\t\tconflicting aliases. The response includes conflicting aliases in the list that occur\n\t\t\tafter the marker. To get the next page of the list, set this field's value to the value\n\t\t\tof NextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in the list of\n\t\t\tconflicting aliases. The response includes conflicting aliases in the list that occur\n\t\t\tafter the marker. To get the next page of the list, set this field’s value to the value\n\t\t\tof NextMarker
from the current page’s response.
Gets a list of the continuous deployment policies in your Amazon Web Services account.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of the continuous deployment policies in your Amazon Web Services account.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tcontinuous deployment policies. The response includes policies in the list that occur\n\t\t\tafter the marker. To get the next page of the list, set this field's value to the value\n\t\t\tof NextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tcontinuous deployment policies. The response includes policies in the list that occur\n\t\t\tafter the marker. To get the next page of the list, set this field’s value to the value\n\t\t\tof NextMarker
from the current page’s response.
Gets a list of distribution IDs for distributions that have a cache behavior that's\n\t\t\tassociated with the specified cache policy.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of distribution IDs for distributions that have a cache behavior that’s\n\t\t\tassociated with the specified cache policy.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Gets a list of distribution IDs for distributions that have a cache behavior that\n\t\t\treferences the specified key group.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of distribution IDs for distributions that have a cache behavior that\n\t\t\treferences the specified key group.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Gets a list of distribution IDs for distributions that have a cache behavior that's\n\t\t\tassociated with the specified origin request policy.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of distribution IDs for distributions that have a cache behavior that’s\n\t\t\tassociated with the specified origin request policy.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Gets a list of distributions that have a cache behavior that's associated with the\n\t\t\tspecified real-time log configuration.
\nYou can specify the real-time log configuration by its name or its Amazon Resource\n\t\t\tName (ARN). You must provide at least one. If you provide both, CloudFront uses the name to\n\t\t\tidentify the real-time log configuration to list distributions for.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of distributions that have a cache behavior that’s associated with the\n\t\t\tspecified real-time log configuration.
\n\t\tYou can specify the real-time log configuration by its name or its Amazon Resource\n\t\t\tName (ARN). You must provide at least one. If you provide both, CloudFront uses the name to\n\t\t\tidentify the real-time log configuration to list distributions for.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistributions. The response includes distributions in the list that occur after the\n\t\t\tmarker. To get the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistributions. The response includes distributions in the list that occur after the\n\t\t\tmarker. To get the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Gets a list of distribution IDs for distributions that have a cache behavior that's\n\t\t\tassociated with the specified response headers policy.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of distribution IDs for distributions that have a cache behavior that’s\n\t\t\tassociated with the specified response headers policy.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tdistribution IDs. The response includes distribution IDs in the list that occur after\n\t\t\tthe marker. To get the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Use Marker
and MaxItems
to control pagination of results. If\n\t\t\tyou have more than MaxItems
distributions that satisfy the request, the\n\t\t\tresponse includes a NextMarker
element. To get the next page of results,\n\t\t\tsubmit another request. For the value of Marker
, specify the value of\n\t\t\t\tNextMarker
from the last response. (For the first request, omit\n\t\t\t\tMarker
.)
Use Marker
and MaxItems
to control pagination of results. If\n\t\t\tyou have more than MaxItems
distributions that satisfy the request, the\n\t\t\tresponse includes a NextMarker
element. To get the next page of results,\n\t\t\tsubmit another request. For the value of Marker
, specify the value of\n\t\t\t\tNextMarker
from the last response. (For the first request, omit\n\t\t\t\tMarker
.)
The DistributionList
type.
The DistributionList
type.
The request to list your distributions.
" + "smithy.api#documentation": "The request to list your distributions.
" } }, "com.amazonaws.cloudfront#ListDistributionsResult": { @@ -10673,13 +10673,13 @@ "DistributionList": { "target": "com.amazonaws.cloudfront#DistributionList", "traits": { - "smithy.api#documentation": "The DistributionList
type.
The DistributionList
type.
The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#ListFieldLevelEncryptionConfigs": { @@ -10710,14 +10710,14 @@ "Marker": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "Use this when paginating results to indicate where to begin in your list of\n\t\t\tconfigurations. The results include configurations in the list that occur after the\n\t\t\tmarker. To get the next page of results, set the Marker
to the value of the\n\t\t\t\tNextMarker
from the current page's response (which is also the ID of\n\t\t\tthe last configuration on that page).
Use this when paginating results to indicate where to begin in your list of\n\t\t\tconfigurations. The results include configurations in the list that occur after the\n\t\t\tmarker. To get the next page of results, set the Marker
to the value of the\n\t\t\t\tNextMarker
from the current page's response (which is also the ID of\n\t\t\tthe last configuration on that page).
The maximum number of field-level encryption configurations you want in the response\n\t\t\tbody.
", + "smithy.api#documentation": "The maximum number of field-level encryption configurations you want in the response\n\t\t\tbody.
", "smithy.api#httpQuery": "MaxItems" } } @@ -10763,7 +10763,7 @@ "Marker": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "Use this when paginating results to indicate where to begin in your list of profiles.\n\t\t\tThe results include profiles in the list that occur after the marker. To get the next\n\t\t\tpage of results, set the Marker
to the value of the NextMarker
\n\t\t\tfrom the current page's response (which is also the ID of the last profile on that\n\t\t\tpage).
Use this when paginating results to indicate where to begin in your list of profiles.\n\t\t\tThe results include profiles in the list that occur after the marker. To get the next\n\t\t\tpage of results, set the Marker
to the value of the NextMarker
\n\t\t\tfrom the current page's response (which is also the ID of the last profile on that\n\t\t\tpage).
Gets a list of all CloudFront functions in your Amazon Web Services account.
\nYou can optionally apply a filter to return only the functions that are in the\n\t\t\tspecified stage, either DEVELOPMENT
or LIVE
.
You can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of all CloudFront functions in your Amazon Web Services account.
\n\t\tYou can optionally apply a filter to return only the functions that are in the\n\t\t\tspecified stage, either DEVELOPMENT
or LIVE
.
You can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tfunctions. The response includes functions in the list that occur after the marker. To\n\t\t\tget the next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tfunctions. The response includes functions in the list that occur after the marker. To\n\t\t\tget the next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Lists invalidation batches.
", + "smithy.api#documentation": "Lists invalidation batches.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/distribution/{DistributionId}/invalidation", @@ -10899,7 +10899,7 @@ "Marker": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "Use this parameter when paginating results to indicate where to begin in your list of\n\t\t\tinvalidation batches. Because the results are returned in decreasing order from most\n\t\t\trecent to oldest, the most recent results are on the first page, the second page will\n\t\t\tcontain earlier results, and so on. To get the next page of results, set\n\t\t\t\tMarker
to the value of the NextMarker
from the current\n\t\t\tpage's response. This value is the same as the ID of the last invalidation batch on that\n\t\t\tpage.
Use this parameter when paginating results to indicate where to begin in your list of\n\t\t\tinvalidation batches. Because the results are returned in decreasing order from most\n\t\t\trecent to oldest, the most recent results are on the first page, the second page will\n\t\t\tcontain earlier results, and so on. To get the next page of results, set\n\t\t\t\tMarker
to the value of the NextMarker
from the current\n\t\t\tpage's response. This value is the same as the ID of the last invalidation batch on that\n\t\t\tpage.
The request to list invalidations.
" + "smithy.api#documentation": "The request to list invalidations.
" } }, "com.amazonaws.cloudfront#ListInvalidationsResult": { @@ -10921,13 +10921,13 @@ "InvalidationList": { "target": "com.amazonaws.cloudfront#InvalidationList", "traits": { - "smithy.api#documentation": "Information about invalidation batches.
", + "smithy.api#documentation": "Information about invalidation batches.
", "smithy.api#httpPayload": {} } } }, "traits": { - "smithy.api#documentation": "The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#ListKeyGroups": { @@ -10944,7 +10944,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a list of key groups.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of key groups.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of key\n\t\t\tgroups. The response includes key groups in the list that occur after the marker. To get\n\t\t\tthe next page of the list, set this field's value to the value of\n\t\t\t\tNextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of key\n\t\t\tgroups. The response includes key groups in the list that occur after the marker. To get\n\t\t\tthe next page of the list, set this field’s value to the value of\n\t\t\t\tNextMarker
from the current page’s response.
Gets the list of CloudFront origin access controls in this Amazon Web Services account.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send another\n\t\t\trequest that specifies the NextMarker
value from the current response as\n\t\t\tthe Marker
value in the next request.
Gets the list of CloudFront origin access controls in this Amazon Web Services account.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send another\n\t\t\trequest that specifies the NextMarker
value from the current response as\n\t\t\tthe Marker
value in the next request.
Gets a list of origin request policies.
\nYou can optionally apply a filter to return only the managed policies created by\n\t\t\tAmazon Web Services, or only the custom policies created in your Amazon Web Services account.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of origin request policies.
\n\t\tYou can optionally apply a filter to return only the managed policies created by\n\t\t\tAmazon Web Services, or only the custom policies created in your Amazon Web Services account.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
A filter to return only the specified kinds of origin request policies. Valid values\n\t\t\tare:
\n\n managed
– Returns only the managed policies created by\n\t\t\t\t\tAmazon Web Services.
\n custom
– Returns only the custom policies created in your\n\t\t\t\t\tAmazon Web Services account.
A filter to return only the specified kinds of origin request policies. Valid values\n\t\t\tare:
\n\t\t\n managed
– Returns only the managed policies created by\n\t\t\t\t\tAmazon Web Services.
\n custom
– Returns only the custom policies created in your\n\t\t\t\t\tAmazon Web Services account.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\torigin request policies. The response includes origin request policies in the list that\n\t\t\toccur after the marker. To get the next page of the list, set this field's value to the\n\t\t\tvalue of NextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\torigin request policies. The response includes origin request policies in the list that\n\t\t\toccur after the marker. To get the next page of the list, set this field’s value to the\n\t\t\tvalue of NextMarker
from the current page’s response.
Use this when paginating results to indicate where to begin in your list of public\n\t\t\tkeys. The results include public keys in the list that occur after the marker. To get\n\t\t\tthe next page of results, set the Marker
to the value of the\n\t\t\t\tNextMarker
from the current page's response (which is also the ID of\n\t\t\tthe last public key on that page).
Use this when paginating results to indicate where to begin in your list of public\n\t\t\tkeys. The results include public keys in the list that occur after the marker. To get\n\t\t\tthe next page of results, set the Marker
to the value of the\n\t\t\t\tNextMarker
from the current page's response (which is also the ID of\n\t\t\tthe last public key on that page).
The maximum number of public keys you want in the response body.
", + "smithy.api#documentation": "The maximum number of public keys you want in the response body.
", "smithy.api#httpQuery": "MaxItems" } } @@ -11175,7 +11175,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets a list of real-time log configurations.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of real-time log configurations.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\treal-time log configurations. The response includes real-time log configurations in the\n\t\t\tlist that occur after the marker. To get the next page of the list, set this field's\n\t\t\tvalue to the value of NextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\treal-time log configurations. The response includes real-time log configurations in the\n\t\t\tlist that occur after the marker. To get the next page of the list, set this field’s\n\t\t\tvalue to the value of NextMarker
from the current page’s response.
Gets a list of response headers policies.
\nYou can optionally apply a filter to get only the managed policies created by Amazon Web Services,\n\t\t\tor only the custom policies created in your Amazon Web Services account.
\nYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
Gets a list of response headers policies.
\n\t\tYou can optionally apply a filter to get only the managed policies created by Amazon Web Services,\n\t\t\tor only the custom policies created in your Amazon Web Services account.
\n\t\tYou can optionally specify the maximum number of items to receive in the response. If\n\t\t\tthe total number of items in the list exceeds the maximum that you specify, or the\n\t\t\tdefault maximum, the response is paginated. To get the next page of items, send a\n\t\t\tsubsequent request that specifies the NextMarker
value from the current\n\t\t\tresponse as the Marker
value in the subsequent request.
A filter to get only the specified kind of response headers policies. Valid values\n\t\t\tare:
\n\n managed
– Gets only the managed policies created by Amazon Web Services.
\n custom
– Gets only the custom policies created in your\n\t\t\t\t\tAmazon Web Services account.
A filter to get only the specified kind of response headers policies. Valid values\n\t\t\tare:
\n\t\t\n managed
– Gets only the managed policies created by Amazon Web Services.
\n custom
– Gets only the custom policies created in your\n\t\t\t\t\tAmazon Web Services account.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tresponse headers policies. The response includes response headers policies in the list\n\t\t\tthat occur after the marker. To get the next page of the list, set this field's value to\n\t\t\tthe value of NextMarker
from the current page's response.
Use this field when paginating results to indicate where to begin in your list of\n\t\t\tresponse headers policies. The response includes response headers policies in the list\n\t\t\tthat occur after the marker. To get the next page of the list, set this field’s value to\n\t\t\tthe value of NextMarker
from the current page’s response.
List streaming distributions.
", + "smithy.api#documentation": "List streaming distributions.
", "smithy.api#http": { "method": "GET", "uri": "/2020-05-31/streaming-distribution", @@ -11327,7 +11327,7 @@ } }, "traits": { - "smithy.api#documentation": "The request to list your streaming distributions.
" + "smithy.api#documentation": "The request to list your streaming distributions.
" } }, "com.amazonaws.cloudfront#ListStreamingDistributionsResult": { @@ -11336,13 +11336,13 @@ "StreamingDistributionList": { "target": "com.amazonaws.cloudfront#StreamingDistributionList", "traits": { - "smithy.api#documentation": "The StreamingDistributionList
type.
The StreamingDistributionList
type.
The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#ListTagsForResource": { @@ -11382,14 +11382,14 @@ "Resource": { "target": "com.amazonaws.cloudfront#ResourceARN", "traits": { - "smithy.api#documentation": "An ARN of a CloudFront resource.
", + "smithy.api#documentation": "An ARN of a CloudFront resource.
", "smithy.api#httpQuery": "Resource", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The request to list tags for a CloudFront resource.
" + "smithy.api#documentation": "The request to list tags for a CloudFront resource.
" } }, "com.amazonaws.cloudfront#ListTagsForResourceResult": { @@ -11398,14 +11398,14 @@ "Tags": { "target": "com.amazonaws.cloudfront#Tags", "traits": { - "smithy.api#documentation": "A complex type that contains zero or more Tag
elements.
A complex type that contains zero or more Tag
elements.
The returned result of the corresponding request.
" + "smithy.api#documentation": "The returned result of the corresponding request.
" } }, "com.amazonaws.cloudfront#LocationList": { @@ -11630,7 +11630,7 @@ } }, "traits": { - "smithy.api#documentation": "The continuous deployment policy doesn't exist.
", + "smithy.api#documentation": "The continuous deployment policy doesn’t exist.
", "smithy.api#error": "client", "smithy.api#httpError": 404 } @@ -11721,7 +11721,7 @@ } }, "traits": { - "smithy.api#documentation": "No origin exists with the specified Origin Id
.
No origin exists with the specified Origin Id
.
A unique identifier for the origin. This value must be unique within the\n\t\t\tdistribution.
\nUse this value to specify the TargetOriginId
in a\n\t\t\t\tCacheBehavior
or DefaultCacheBehavior
.
A unique identifier for the origin. This value must be unique within the\n\t\t\tdistribution.
\n\t\tUse this value to specify the TargetOriginId
in a\n\t\t\t\tCacheBehavior
or DefaultCacheBehavior
.
The domain name for the origin.
\nFor more information, see Origin Domain Name in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The domain name for the origin.
\n\t\tFor more information, see Origin Domain Name in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, "OriginPath": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "An optional path that CloudFront appends to the origin domain name when CloudFront requests\n\t\t\tcontent from the origin.
\nFor more information, see Origin Path in the\n\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "An optional path that CloudFront appends to the origin domain name when CloudFront requests\n\t\t\tcontent from the origin.
\n\t\tFor more information, see Origin Path in the\n\t\t\tAmazon CloudFront Developer Guide.
" } }, "CustomHeaders": { "target": "com.amazonaws.cloudfront#CustomHeaders", "traits": { - "smithy.api#documentation": "A list of HTTP header names and values that CloudFront adds to the requests that it sends to\n\t\t\tthe origin.
\nFor more information, see Adding Custom Headers to Origin Requests in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A list of HTTP header names and values that CloudFront adds to the requests that it sends to\n\t\t\tthe origin.
\n\t\tFor more information, see Adding Custom Headers to Origin Requests in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "S3OriginConfig": { @@ -11861,30 +11861,30 @@ "ConnectionAttempts": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The number of times that CloudFront attempts to connect to the origin. The minimum number is\n\t\t\t1, the maximum is 3, and the default (if you don't specify otherwise) is 3.
\nFor a custom origin (including an Amazon S3 bucket that's configured with static website\n\t\t\thosting), this value also specifies the number of times that CloudFront attempts to get a\n\t\t\tresponse from the origin, in the case of an Origin Response Timeout.
\nFor more information, see Origin Connection Attempts in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The number of times that CloudFront attempts to connect to the origin. The minimum number is\n\t\t\t1, the maximum is 3, and the default (if you don’t specify otherwise) is 3.
\n\t\tFor a custom origin (including an Amazon S3 bucket that’s configured with static website\n\t\t\thosting), this value also specifies the number of times that CloudFront attempts to get a\n\t\t\tresponse from the origin, in the case of an Origin Response Timeout.
\n\t\tFor more information, see Origin Connection Attempts in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "ConnectionTimeout": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The number of seconds that CloudFront waits when trying to establish a connection to the\n\t\t\torigin. The minimum timeout is 1 second, the maximum is 10 seconds, and the default (if\n\t\t\tyou don't specify otherwise) is 10 seconds.
\nFor more information, see Origin Connection Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The number of seconds that CloudFront waits when trying to establish a connection to the\n\t\t\torigin. The minimum timeout is 1 second, the maximum is 10 seconds, and the default (if\n\t\t\tyou don’t specify otherwise) is 10 seconds.
\n\t\tFor more information, see Origin Connection Timeout in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "OriginShield": { "target": "com.amazonaws.cloudfront#OriginShield", "traits": { - "smithy.api#documentation": "CloudFront Origin Shield. Using Origin Shield can help reduce the load on your\n\t\t\torigin.
\nFor more information, see Using Origin Shield in the Amazon CloudFront Developer Guide.
" + "smithy.api#documentation": "CloudFront Origin Shield. Using Origin Shield can help reduce the load on your\n\t\t\torigin.
\n\t\tFor more information, see Using Origin Shield in the Amazon CloudFront Developer Guide.
" } }, "OriginAccessControlId": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The unique identifier of an origin access control for this origin.
\nFor more information, see Restricting access to an Amazon S3 origin in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The unique identifier of an origin access control for this origin.
\n\t\tFor more information, see Restricting access to an Amazon S3 origin in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } } }, "traits": { - "smithy.api#documentation": "An origin.
\nAn origin is the location where content is stored, and from which CloudFront gets content to\n\t\t\tserve to viewers. To specify an origin:
\nUse S3OriginConfig
to specify an Amazon S3 bucket that is not\n\t\t\t\t\tconfigured with static website hosting.
Use CustomOriginConfig
to specify all other kinds of origins,\n\t\t\t\t\tincluding:
An Amazon S3 bucket that is configured with static website hosting
\nAn Elastic Load Balancing load balancer
\nAn AWS Elemental MediaPackage endpoint
\nAn AWS Elemental MediaStore container
\nAny other HTTP server, running on an Amazon EC2 instance or any other kind\n\t\t\t\t\t\t\tof host
\nFor the current maximum number of origins that you can specify per distribution, see\n\t\t\t\tGeneral Quotas on Web Distributions in the\n\t\t\t\tAmazon CloudFront Developer Guide (quotas were formerly referred to as\n\t\t\tlimits).
" + "smithy.api#documentation": "An origin.
\n\t\tAn origin is the location where content is stored, and from which CloudFront gets content to\n\t\t\tserve to viewers. To specify an origin:
\n\t\tUse S3OriginConfig
to specify an Amazon S3 bucket that is not\n\t\t\t\t\tconfigured with static website hosting.
Use CustomOriginConfig
to specify all other kinds of origins,\n\t\t\t\t\tincluding:
An Amazon S3 bucket that is configured with static website hosting
\n\t\t\t\t\tAn Elastic Load Balancing load balancer
\n\t\t\t\t\tAn AWS Elemental MediaPackage endpoint
\n\t\t\t\t\tAn AWS Elemental MediaStore container
\n\t\t\t\t\tAny other HTTP server, running on an Amazon EC2 instance or any other kind\n\t\t\t\t\t\t\tof host
\n\t\t\t\t\tFor the current maximum number of origins that you can specify per distribution, see\n\t\t\t\tGeneral Quotas on Web Distributions in the\n\t\t\t\tAmazon CloudFront Developer Guide (quotas were formerly referred to as\n\t\t\tlimits).
" } }, "com.amazonaws.cloudfront#OriginAccessControl": { @@ -11947,7 +11947,7 @@ "SigningBehavior": { "target": "com.amazonaws.cloudfront#OriginAccessControlSigningBehaviors", "traits": { - "smithy.api#documentation": "Specifies which requests CloudFront signs (adds authentication information to). Specify\n\t\t\t\talways
for the most common use case. For more information, see origin access control advanced settings in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field can have one of the following values:
\n\n always
– CloudFront signs all origin requests, overwriting the\n\t\t\t\t\t\tAuthorization
header from the viewer request if one\n\t\t\t\t\texists.
\n never
– CloudFront doesn't sign any origin requests. This value turns\n\t\t\t\t\toff origin access control for all origins in all distributions that use this\n\t\t\t\t\torigin access control.
\n no-override
– If the viewer request doesn't contain the\n\t\t\t\t\t\tAuthorization
header, then CloudFront signs the origin request. If\n\t\t\t\t\tthe viewer request contains the Authorization
header, then CloudFront\n\t\t\t\t\tdoesn't sign the origin request and instead passes along the\n\t\t\t\t\t\tAuthorization
header from the viewer request. WARNING: To pass along the Authorization
header\n\t\t\t\t\t\tfrom the viewer request, you must add the\n\t\t\t\t\t\t\tAuthorization
header to a cache policy for all cache behaviors that\n\t\t\t\t\t\tuse origins associated with this origin access control.\n
Specifies which requests CloudFront signs (adds authentication information to). Specify\n\t\t\t\talways
for the most common use case. For more information, see origin access control advanced settings in the\n\t\t\t\tAmazon CloudFront Developer Guide.
This field can have one of the following values:
\n\t\t\n always
– CloudFront signs all origin requests, overwriting the\n\t\t\t\t\t\tAuthorization
header from the viewer request if one\n\t\t\t\t\texists.
\n never
– CloudFront doesn't sign any origin requests. This value turns\n\t\t\t\t\toff origin access control for all origins in all distributions that use this\n\t\t\t\t\torigin access control.
\n no-override
– If the viewer request doesn't contain the\n\t\t\t\t\t\tAuthorization
header, then CloudFront signs the origin request. If\n\t\t\t\t\tthe viewer request contains the Authorization
header, then CloudFront\n\t\t\t\t\tdoesn't sign the origin request and instead passes along the\n\t\t\t\t\t\tAuthorization
header from the viewer request. WARNING: To pass along the Authorization
header\n\t\t\t\t\t\tfrom the viewer request, you must add the\n\t\t\t\t\t\t\tAuthorization
header to a cache policy for all cache behaviors that\n\t\t\t\t\t\tuse origins associated with this origin access control.\n
A value that specifies which requests CloudFront signs (adds authentication information to).\n\t\t\tThis field can have one of the following values:
\n\n never
– CloudFront doesn't sign any origin requests.
\n always
– CloudFront signs all origin requests, overwriting the\n\t\t\t\t\t\tAuthorization
header from the viewer request if\n\t\t\t\t\tnecessary.
\n no-override
– If the viewer request doesn't contain the\n\t\t\t\t\t\tAuthorization
header, CloudFront signs the origin request. If the\n\t\t\t\t\tviewer request contains the Authorization
header, CloudFront doesn't sign\n\t\t\t\t\tthe origin request, but instead passes along the Authorization
\n\t\t\t\t\theader that it received in the viewer request.
A value that specifies which requests CloudFront signs (adds authentication information to).\n\t\t\tThis field can have one of the following values:
\n\t\t\n never
– CloudFront doesn't sign any origin requests.
\n always
– CloudFront signs all origin requests, overwriting the\n\t\t\t\t\t\tAuthorization
header from the viewer request if\n\t\t\t\t\tnecessary.
\n no-override
– If the viewer request doesn't contain the\n\t\t\t\t\t\tAuthorization
header, CloudFront signs the origin request. If the\n\t\t\t\t\tviewer request contains the Authorization
header, CloudFront doesn't sign\n\t\t\t\t\tthe origin request, but instead passes along the Authorization
\n\t\t\t\t\theader that it received in the viewer request.
A complex type that contains HeaderName
and HeaderValue
\n\t\t\telements, if any, for this distribution.
A complex type that contains HeaderName
and HeaderValue
\n\t\t\telements, if any, for this distribution.
An origin request policy.
\nWhen it's attached to a cache behavior, the origin request policy determines the\n\t\t\tvalues that CloudFront includes in requests that it sends to the origin. Each request that\n\t\t\tCloudFront sends to the origin includes the following:
\nThe request body and the URL path (without the domain name) from the viewer\n\t\t\t\t\trequest.
\nThe headers that CloudFront automatically includes in every origin request,\n\t\t\t\t\tincluding Host
, User-Agent
, and\n\t\t\t\t\t\tX-Amz-Cf-Id
.
All HTTP headers, cookies, and URL query strings that are specified in the\n\t\t\t\t\tcache policy or the origin request policy. These can include items from the\n\t\t\t\t\tviewer request and, in the case of headers, additional ones that are added by\n\t\t\t\t\tCloudFront.
\nCloudFront sends a request when it can't find an object in its cache that matches the\n\t\t\trequest. If you want to send values to the origin and also include them in the cache\n\t\t\tkey, use CachePolicy
.
An origin request policy.
\n\t\tWhen it’s attached to a cache behavior, the origin request policy determines the\n\t\t\tvalues that CloudFront includes in requests that it sends to the origin. Each request that\n\t\t\tCloudFront sends to the origin includes the following:
\n\t\tThe request body and the URL path (without the domain name) from the viewer\n\t\t\t\t\trequest.
\n\t\t\tThe headers that CloudFront automatically includes in every origin request,\n\t\t\t\t\tincluding Host
, User-Agent
, and\n\t\t\t\t\t\tX-Amz-Cf-Id
.
All HTTP headers, cookies, and URL query strings that are specified in the\n\t\t\t\t\tcache policy or the origin request policy. These can include items from the\n\t\t\t\t\tviewer request and, in the case of headers, additional ones that are added by\n\t\t\t\t\tCloudFront.
\n\t\t\tCloudFront sends a request when it can’t find an object in its cache that matches the\n\t\t\trequest. If you want to send values to the origin and also include them in the cache\n\t\t\tkey, use CachePolicy
.
An origin request policy configuration.
\nThis configuration determines the values that CloudFront includes in requests that it sends\n\t\t\tto the origin. Each request that CloudFront sends to the origin includes the following:
\nThe request body and the URL path (without the domain name) from the viewer\n\t\t\t\t\trequest.
\nThe headers that CloudFront automatically includes in every origin request,\n\t\t\t\t\tincluding Host
, User-Agent
, and\n\t\t\t\t\t\tX-Amz-Cf-Id
.
All HTTP headers, cookies, and URL query strings that are specified in the\n\t\t\t\t\tcache policy or the origin request policy. These can include items from the\n\t\t\t\t\tviewer request and, in the case of headers, additional ones that are added by\n\t\t\t\t\tCloudFront.
\nCloudFront sends a request when it can't find an object in its cache that matches the\n\t\t\trequest. If you want to send values to the origin and also include them in the cache\n\t\t\tkey, use CachePolicy
.
An origin request policy configuration.
\n\t\tThis configuration determines the values that CloudFront includes in requests that it sends\n\t\t\tto the origin. Each request that CloudFront sends to the origin includes the following:
\n\t\tThe request body and the URL path (without the domain name) from the viewer\n\t\t\t\t\trequest.
\n\t\t\tThe headers that CloudFront automatically includes in every origin request,\n\t\t\t\t\tincluding Host
, User-Agent
, and\n\t\t\t\t\t\tX-Amz-Cf-Id
.
All HTTP headers, cookies, and URL query strings that are specified in the\n\t\t\t\t\tcache policy or the origin request policy. These can include items from the\n\t\t\t\t\tviewer request and, in the case of headers, additional ones that are added by\n\t\t\t\t\tCloudFront.
\n\t\t\tCloudFront sends a request when it can’t find an object in its cache that matches the\n\t\t\trequest. If you want to send values to the origin and also include them in the cache\n\t\t\tkey, use CachePolicy
.
Determines whether cookies in viewer requests are included in requests that CloudFront sends\n\t\t\tto the origin. Valid values are:
\n\n none
– Cookies in viewer requests are not included in requests\n\t\t\t\t\tthat CloudFront sends to the origin. Even when this field is set to none
,\n\t\t\t\t\tany cookies that are listed in a CachePolicy
\n are included in origin requests.
\n whitelist
– The cookies in viewer requests that are listed in the\n\t\t\t\t\t\tCookieNames
type are included in requests that CloudFront sends to\n\t\t\t\t\tthe origin.
\n all
– All cookies in viewer requests are included in requests\n\t\t\t\t\tthat CloudFront sends to the origin.
Determines whether cookies in viewer requests are included in requests that CloudFront sends\n\t\t\tto the origin. Valid values are:
\n\t\t\n none
– Cookies in viewer requests are not included in requests\n\t\t\t\t\tthat CloudFront sends to the origin. Even when this field is set to none
,\n\t\t\t\t\tany cookies that are listed in a CachePolicy
\n\t\t\t\t\t are included in origin requests.
\n whitelist
– The cookies in viewer requests that are listed in the\n\t\t\t\t\t\tCookieNames
type are included in requests that CloudFront sends to\n\t\t\t\t\tthe origin.
\n all
– All cookies in viewer requests are included in requests\n\t\t\t\t\tthat CloudFront sends to the origin.
Determines whether any HTTP headers are included in requests that CloudFront sends to the\n\t\t\torigin. Valid values are:
\n\n none
– HTTP headers are not included in requests that CloudFront sends\n\t\t\t\t\tto the origin. Even when this field is set to none
, any headers\n\t\t\t\t\tthat are listed in a CachePolicy
\n are included in origin requests.
\n whitelist
– The HTTP headers that are listed in the\n\t\t\t\t\t\tHeaders
type are included in requests that CloudFront sends to the\n\t\t\t\t\torigin.
\n allViewer
– All HTTP headers in viewer requests are included in\n\t\t\t\t\trequests that CloudFront sends to the origin.
\n allViewerAndWhitelistCloudFront
– All HTTP headers in viewer\n\t\t\t\t\trequests and the additional CloudFront headers that are listed in the\n\t\t\t\t\t\tHeaders
type are included in requests that CloudFront sends to the\n\t\t\t\t\torigin. The additional headers are added by CloudFront.
Determines whether any HTTP headers are included in requests that CloudFront sends to the\n\t\t\torigin. Valid values are:
\n\t\t\n none
– HTTP headers are not included in requests that CloudFront sends\n\t\t\t\t\tto the origin. Even when this field is set to none
, any headers\n\t\t\t\t\tthat are listed in a CachePolicy
\n\t\t\t\t\t are included in origin requests.
\n whitelist
– The HTTP headers that are listed in the\n\t\t\t\t\t\tHeaders
type are included in requests that CloudFront sends to the\n\t\t\t\t\torigin.
\n allViewer
– All HTTP headers in viewer requests are included in\n\t\t\t\t\trequests that CloudFront sends to the origin.
\n allViewerAndWhitelistCloudFront
– All HTTP headers in viewer\n\t\t\t\t\trequests and the additional CloudFront headers that are listed in the\n\t\t\t\t\t\tHeaders
type are included in requests that CloudFront sends to the\n\t\t\t\t\torigin. The additional headers are added by CloudFront.
Determines whether any URL query strings in viewer requests are included in requests\n\t\t\tthat CloudFront sends to the origin. Valid values are:
\n\n none
– Query strings in viewer requests are not included in\n\t\t\t\t\trequests that CloudFront sends to the origin. Even when this field is set to\n\t\t\t\t\t\tnone
, any query strings that are listed in a\n\t\t\t\t\t\tCachePolicy
\n are included in origin requests.
\n whitelist
– The query strings in viewer requests that are listed\n\t\t\t\t\tin the QueryStringNames
type are included in requests that CloudFront\n\t\t\t\t\tsends to the origin.
\n all
– All query strings in viewer requests are included in\n\t\t\t\t\trequests that CloudFront sends to the origin.
Determines whether any URL query strings in viewer requests are included in requests\n\t\t\tthat CloudFront sends to the origin. Valid values are:
\n\t\t\n none
– Query strings in viewer requests are not included in\n\t\t\t\t\trequests that CloudFront sends to the origin. Even when this field is set to\n\t\t\t\t\t\tnone
, any query strings that are listed in a\n\t\t\t\t\t\tCachePolicy
\n\t\t\t\t\t are included in origin requests.
\n whitelist
– The query strings in viewer requests that are listed\n\t\t\t\t\tin the QueryStringNames
type are included in requests that CloudFront\n\t\t\t\t\tsends to the origin.
\n all
– All query strings in viewer requests are included in\n\t\t\t\t\trequests that CloudFront sends to the origin.
A flag that specifies whether Origin Shield is enabled.
\nWhen it's enabled, CloudFront routes all requests through Origin Shield, which can help\n\t\t\tprotect your origin. When it's disabled, CloudFront might send requests directly to your\n\t\t\torigin from multiple edge locations or regional edge caches.
", + "smithy.api#documentation": "A flag that specifies whether Origin Shield is enabled.
\n\t\tWhen it’s enabled, CloudFront routes all requests through Origin Shield, which can help\n\t\t\tprotect your origin. When it’s disabled, CloudFront might send requests directly to your\n\t\t\torigin from multiple edge locations or regional edge caches.
", "smithy.api#required": {} } }, "OriginShieldRegion": { "target": "com.amazonaws.cloudfront#OriginShieldRegion", "traits": { - "smithy.api#documentation": "The Amazon Web Services Region for Origin Shield.
\nSpecify the Amazon Web Services Region that has the lowest latency to your origin. To specify a\n\t\t\tregion, use the region code, not the region name. For example, specify the US East\n\t\t\t(Ohio) region as us-east-2
.
When you enable CloudFront Origin Shield, you must specify the Amazon Web Services Region for Origin\n\t\t\tShield. For the list of Amazon Web Services Regions that you can specify, and for help choosing the\n\t\t\tbest Region for your origin, see Choosing the Amazon Web Services Region for Origin Shield in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "The Amazon Web Services Region for Origin Shield.
\n\t\tSpecify the Amazon Web Services Region that has the lowest latency to your origin. To specify a\n\t\t\tregion, use the region code, not the region name. For example, specify the US East\n\t\t\t(Ohio) region as us-east-2
.
When you enable CloudFront Origin Shield, you must specify the Amazon Web Services Region for Origin\n\t\t\tShield. For the list of Amazon Web Services Regions that you can specify, and for help choosing the\n\t\t\tbest Region for your origin, see Choosing the Amazon Web Services Region for Origin Shield in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } } }, "traits": { - "smithy.api#documentation": "CloudFront Origin Shield.
\nUsing Origin Shield can help reduce the load on your origin. For more information, see\n\t\t\t\tUsing Origin Shield in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "CloudFront Origin Shield.
\n\t\tUsing Origin Shield can help reduce the load on your origin. For more information, see\n\t\t\t\tUsing Origin Shield in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#OriginShieldRegion": { @@ -12674,7 +12674,7 @@ "Quantity": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing\n\t\t\tan HTTPS connection with this origin.
", + "smithy.api#documentation": "The number of SSL/TLS protocols that you want to allow CloudFront to use when establishing\n\t\t\tan HTTPS connection with this origin.
", "smithy.api#required": {} } }, @@ -12687,7 +12687,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that contains information about the SSL/TLS protocols that CloudFront can use\n\t\t\twhen establishing an HTTPS connection with your origin.
" + "smithy.api#documentation": "A complex type that contains information about the SSL/TLS protocols that CloudFront can use\n\t\t\twhen establishing an HTTPS connection with your origin.
" } }, "com.amazonaws.cloudfront#Origins": { @@ -12718,14 +12718,14 @@ "EnableAcceptEncodingGzip": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "A flag that can affect whether the Accept-Encoding
HTTP header is\n\t\t\tincluded in the cache key and included in requests that CloudFront sends to the origin.
This field is related to the EnableAcceptEncodingBrotli
field. If one or\n\t\t\tboth of these fields is true
\n and the viewer request includes the Accept-Encoding
\n\t\t\theader, then CloudFront does the following:
Normalizes the value of the viewer's Accept-Encoding
\n\t\t\t\t\theader
Includes the normalized header in the cache key
\nIncludes the normalized header in the request to the origin, if a request is\n\t\t\t\t\tnecessary
\nFor more information, see Compression support in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nIf you set this value to true
, and this cache behavior also has an origin\n\t\t\trequest policy attached, do not include the Accept-Encoding
header in the\n\t\t\torigin request policy. CloudFront always includes the Accept-Encoding
header in\n\t\t\torigin requests when the value of this field is true
, so including this\n\t\t\theader in an origin request policy has no effect.
If both of these fields are false
, then CloudFront treats the\n\t\t\t\tAccept-Encoding
header the same as any other HTTP header in the viewer\n\t\t\trequest. By default, it's not included in the cache key and it's not included in origin\n\t\t\trequests. In this case, you can manually add Accept-Encoding
to the headers\n\t\t\twhitelist like any other HTTP header.
A flag that can affect whether the Accept-Encoding
HTTP header is\n\t\t\tincluded in the cache key and included in requests that CloudFront sends to the origin.
This field is related to the EnableAcceptEncodingBrotli
field. If one or\n\t\t\tboth of these fields is true
\n\t\t\t and the viewer request includes the Accept-Encoding
\n\t\t\theader, then CloudFront does the following:
Normalizes the value of the viewer’s Accept-Encoding
\n\t\t\t\t\theader
Includes the normalized header in the cache key
\n\t\t\tIncludes the normalized header in the request to the origin, if a request is\n\t\t\t\t\tnecessary
\n\t\t\tFor more information, see Compression support in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you set this value to true
, and this cache behavior also has an origin\n\t\t\trequest policy attached, do not include the Accept-Encoding
header in the\n\t\t\torigin request policy. CloudFront always includes the Accept-Encoding
header in\n\t\t\torigin requests when the value of this field is true
, so including this\n\t\t\theader in an origin request policy has no effect.
If both of these fields are false
, then CloudFront treats the\n\t\t\t\tAccept-Encoding
header the same as any other HTTP header in the viewer\n\t\t\trequest. By default, it’s not included in the cache key and it’s not included in origin\n\t\t\trequests. In this case, you can manually add Accept-Encoding
to the headers\n\t\t\twhitelist like any other HTTP header.
A flag that can affect whether the Accept-Encoding
HTTP header is\n\t\t\tincluded in the cache key and included in requests that CloudFront sends to the origin.
This field is related to the EnableAcceptEncodingGzip
field. If one or\n\t\t\tboth of these fields is true
\n and the viewer request includes the Accept-Encoding
\n\t\t\theader, then CloudFront does the following:
Normalizes the value of the viewer's Accept-Encoding
\n\t\t\t\t\theader
Includes the normalized header in the cache key
\nIncludes the normalized header in the request to the origin, if a request is\n\t\t\t\t\tnecessary
\nFor more information, see Compression support in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\nIf you set this value to true
, and this cache behavior also has an origin\n\t\t\trequest policy attached, do not include the Accept-Encoding
header in the\n\t\t\torigin request policy. CloudFront always includes the Accept-Encoding
header in\n\t\t\torigin requests when the value of this field is true
, so including this\n\t\t\theader in an origin request policy has no effect.
If both of these fields are false
, then CloudFront treats the\n\t\t\t\tAccept-Encoding
header the same as any other HTTP header in the viewer\n\t\t\trequest. By default, it's not included in the cache key and it's not included in origin\n\t\t\trequests. In this case, you can manually add Accept-Encoding
to the headers\n\t\t\twhitelist like any other HTTP header.
A flag that can affect whether the Accept-Encoding
HTTP header is\n\t\t\tincluded in the cache key and included in requests that CloudFront sends to the origin.
This field is related to the EnableAcceptEncodingGzip
field. If one or\n\t\t\tboth of these fields is true
\n\t\t\t and the viewer request includes the Accept-Encoding
\n\t\t\theader, then CloudFront does the following:
Normalizes the value of the viewer’s Accept-Encoding
\n\t\t\t\t\theader
Includes the normalized header in the cache key
\n\t\t\tIncludes the normalized header in the request to the origin, if a request is\n\t\t\t\t\tnecessary
\n\t\t\tFor more information, see Compression support in the\n\t\t\t\tAmazon CloudFront Developer Guide.
\n\t\tIf you set this value to true
, and this cache behavior also has an origin\n\t\t\trequest policy attached, do not include the Accept-Encoding
header in the\n\t\t\torigin request policy. CloudFront always includes the Accept-Encoding
header in\n\t\t\torigin requests when the value of this field is true
, so including this\n\t\t\theader in an origin request policy has no effect.
If both of these fields are false
, then CloudFront treats the\n\t\t\t\tAccept-Encoding
header the same as any other HTTP header in the viewer\n\t\t\trequest. By default, it’s not included in the cache key and it’s not included in origin\n\t\t\trequests. In this case, you can manually add Accept-Encoding
to the headers\n\t\t\twhitelist like any other HTTP header.
This object determines the values that CloudFront includes in the cache key. These values\n\t\t\tcan include HTTP headers, cookies, and URL query strings. CloudFront uses the cache key to\n\t\t\tfind an object in its cache that it can return to the viewer.
\nThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can't find an object in its cache that matches the request's cache key. If you\n\t\t\twant to send values to the origin but not include them in the cache\n\t\t\tkey, use OriginRequestPolicy
.
This object determines the values that CloudFront includes in the cache key. These values\n\t\t\tcan include HTTP headers, cookies, and URL query strings. CloudFront uses the cache key to\n\t\t\tfind an object in its cache that it can return to the viewer.
\n\t\tThe headers, cookies, and query strings that are included in the cache key are\n\t\t\tautomatically included in requests that CloudFront sends to the origin. CloudFront sends a request\n\t\t\twhen it can’t find an object in its cache that matches the request’s cache key. If you\n\t\t\twant to send values to the origin but not include them in the cache\n\t\t\tkey, use OriginRequestPolicy
.
A complex type that contains information about the objects that you want to\n\t\t\tinvalidate. For more information, see Specifying the Objects to Invalidate in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that contains information about the objects that you want to\n\t\t\tinvalidate. For more information, see Specifying the Objects to Invalidate in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#PreconditionFailed": { @@ -12868,7 +12868,7 @@ "CallerReference": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A string included in the request to help make sure that the request can't be\n\t\t\treplayed.
", + "smithy.api#documentation": "A string included in the request to help make sure that the request can’t be\n\t\t\treplayed.
", "smithy.api#required": {} } }, @@ -12914,7 +12914,7 @@ } }, "traits": { - "smithy.api#documentation": "The specified public key is in use.
", + "smithy.api#documentation": "The specified public key is in use.
", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -13030,7 +13030,7 @@ } ], "traits": { - "smithy.api#documentation": "Publishes a CloudFront function by copying the function code from the\n\t\t\t\tDEVELOPMENT
stage to LIVE
. This automatically updates all\n\t\t\tcache behaviors that are using this function to use the newly published copy in the\n\t\t\t\tLIVE
stage.
When a function is published to the LIVE
stage, you can attach the\n\t\t\tfunction to a distribution's cache behavior, using the function's Amazon Resource Name\n\t\t\t(ARN).
To publish a function, you must provide the function's name and version\n\t\t\t\t(ETag
value). To get these values, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
Publishes a CloudFront function by copying the function code from the\n\t\t\t\tDEVELOPMENT
stage to LIVE
. This automatically updates all\n\t\t\tcache behaviors that are using this function to use the newly published copy in the\n\t\t\t\tLIVE
stage.
When a function is published to the LIVE
stage, you can attach the\n\t\t\tfunction to a distribution’s cache behavior, using the function’s Amazon Resource Name\n\t\t\t(ARN).
To publish a function, you must provide the function’s name and version\n\t\t\t\t(ETag
value). To get these values, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
A list that contains the query string parameters that you want CloudFront to use as a basis\n\t\t\tfor caching for a cache behavior. If Quantity
is 0, you can omit\n\t\t\t\tItems
.
A list that contains the query string parameters that you want CloudFront to use as a basis\n\t\t\tfor caching for a cache behavior. If Quantity
is 0, you can omit\n\t\t\t\tItems
.
This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\nIf you want to include query strings in the cache key, use\n\t\t\t\tQueryStringsConfig
in a cache policy. See\n\t\t\tCachePolicy
.
If you want to send query strings to the origin but not include them in the cache key,\n\t\t\tuse QueryStringsConfig
in an origin request policy. See\n\t\t\t\tOriginRequestPolicy
.
A complex type that contains information about the query string parameters that you\n\t\t\twant CloudFront to use for caching for a cache behavior.
" + "smithy.api#documentation": "This field is deprecated. We recommend that you use a cache policy or an origin\n\t\t\trequest policy instead of this field.
\n\t\tIf you want to include query strings in the cache key, use\n\t\t\t\tQueryStringsConfig
in a cache policy. See\n\t\t\tCachePolicy
.
If you want to send query strings to the origin but not include them in the cache key,\n\t\t\tuse QueryStringsConfig
in an origin request policy. See\n\t\t\t\tOriginRequestPolicy
.
A complex type that contains information about the query string parameters that you\n\t\t\twant CloudFront to use for caching for a cache behavior.
" } }, "com.amazonaws.cloudfront#QueryStringCacheKeysList": { @@ -13251,7 +13251,7 @@ "Fields": { "target": "com.amazonaws.cloudfront#FieldList", "traits": { - "smithy.api#documentation": "A list of fields that are included in each real-time log record. In an API response,\n\t\t\tthe fields are provided in the same order in which they are sent to the Amazon Kinesis data\n\t\t\tstream.
\nFor more information about fields, see Real-time log configuration fields in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "A list of fields that are included in each real-time log record. In an API response,\n\t\t\tthe fields are provided in the same order in which they are sent to the Amazon Kinesis data\n\t\t\tstream.
\n\t\tFor more information about fields, see Real-time log configuration fields in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#required": {} } } @@ -13470,13 +13470,13 @@ "ResponseHeadersPolicyConfig": { "target": "com.amazonaws.cloudfront#ResponseHeadersPolicyConfig", "traits": { - "smithy.api#documentation": "A response headers policy configuration.
\nA response headers policy contains information about a set of HTTP response headers\n\t\t\tand their values. CloudFront adds the headers in the policy to HTTP responses that it sends\n\t\t\tfor requests that match a cache behavior that's associated with the policy.
", + "smithy.api#documentation": "A response headers policy configuration.
\n\t\tA response headers policy contains information about a set of HTTP response headers\n\t\t\tand their values. CloudFront adds the headers in the policy to HTTP responses that it sends\n\t\t\tfor requests that match a cache behavior that’s associated with the policy.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A response headers policy.
\nA response headers policy contains information about a set of HTTP response headers\n\t\t\tand their values.
\nAfter you create a response headers policy, you can use its ID to attach it to one or\n\t\t\tmore cache behaviors in a CloudFront distribution. When it's attached to a cache behavior,\n\t\t\tCloudFront adds the headers in the policy to HTTP responses that it sends for requests that\n\t\t\tmatch the cache behavior.
\nFor more information, see Adding HTTP headers to CloudFront responses in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A response headers policy.
\n\t\tA response headers policy contains information about a set of HTTP response headers\n\t\t\tand their values.
\n\t\tAfter you create a response headers policy, you can use its ID to attach it to one or\n\t\t\tmore cache behaviors in a CloudFront distribution. When it’s attached to a cache behavior,\n\t\t\tCloudFront adds the headers in the policy to HTTP responses that it sends for requests that\n\t\t\tmatch the cache behavior.
\n\t\tFor more information, see Adding HTTP headers to CloudFront responses in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#ResponseHeadersPolicyAccessControlAllowHeaders": { @@ -13498,7 +13498,7 @@ } }, "traits": { - "smithy.api#documentation": "A list of HTTP header names that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Headers
HTTP response header.
For more information about the Access-Control-Allow-Headers
HTTP response\n\t\t\theader, see Access-Control-Allow-Headers in the MDN Web Docs.
A list of HTTP header names that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Headers
HTTP response header.
For more information about the Access-Control-Allow-Headers
HTTP response\n\t\t\theader, see Access-Control-Allow-Headers in the MDN Web Docs.
The list of HTTP methods. Valid values are:
\n\n GET
\n
\n DELETE
\n
\n HEAD
\n
\n OPTIONS
\n
\n PATCH
\n
\n POST
\n
\n PUT
\n
\n ALL
\n
\n ALL
is a special value that includes all of the listed HTTP\n\t\t\tmethods.
The list of HTTP methods. Valid values are:
\n\t\t\n GET
\n
\n DELETE
\n
\n HEAD
\n
\n OPTIONS
\n
\n PATCH
\n
\n POST
\n
\n PUT
\n
\n ALL
\n
\n ALL
is a special value that includes all of the listed HTTP\n\t\t\tmethods.
A list of HTTP methods that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Methods
HTTP response header.
For more information about the Access-Control-Allow-Methods
HTTP response\n\t\t\theader, see Access-Control-Allow-Methods in the MDN Web Docs.
A list of HTTP methods that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Methods
HTTP response header.
For more information about the Access-Control-Allow-Methods
HTTP response\n\t\t\theader, see Access-Control-Allow-Methods in the MDN Web Docs.
A list of origins (domain names) that CloudFront can use as the value for the\n\t\t\t\tAccess-Control-Allow-Origin
HTTP response header.
For more information about the Access-Control-Allow-Origin
HTTP response\n\t\t\theader, see Access-Control-Allow-Origin in the MDN Web Docs.
A list of origins (domain names) that CloudFront can use as the value for the\n\t\t\t\tAccess-Control-Allow-Origin
HTTP response header.
For more information about the Access-Control-Allow-Origin
HTTP response\n\t\t\theader, see Access-Control-Allow-Origin in the MDN Web Docs.
A list of HTTP headers that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Expose-Headers
HTTP response header.
For more information about the Access-Control-Expose-Headers
HTTP\n\t\t\tresponse header, see Access-Control-Expose-Headers in the MDN Web Docs.
A list of HTTP headers that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Expose-Headers
HTTP response header.
For more information about the Access-Control-Expose-Headers
HTTP\n\t\t\tresponse header, see Access-Control-Expose-Headers in the MDN Web Docs.
A comment to describe the response headers policy.
\nThe comment cannot be longer than 128 characters.
" + "smithy.api#documentation": "A comment to describe the response headers policy.
\n\t\tThe comment cannot be longer than 128 characters.
" } }, "Name": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A name to identify the response headers policy.
\nThe name must be unique for response headers policies in this Amazon Web Services account.
", + "smithy.api#documentation": "A name to identify the response headers policy.
\n\t\tThe name must be unique for response headers policies in this Amazon Web Services account.
", "smithy.api#required": {} } }, @@ -13674,7 +13674,7 @@ } }, "traits": { - "smithy.api#documentation": "A response headers policy configuration.
\nA response headers policy configuration contains metadata about the response headers\n\t\t\tpolicy, and configurations for sets of HTTP response headers and their values. CloudFront adds\n\t\t\tthe headers in the policy to HTTP responses that it sends for requests that match a\n\t\t\tcache behavior associated with the policy.
" + "smithy.api#documentation": "A response headers policy configuration.
\n\t\tA response headers policy configuration contains metadata about the response headers\n\t\t\tpolicy, and configurations for sets of HTTP response headers and their values. CloudFront adds\n\t\t\tthe headers in the policy to HTTP responses that it sends for requests that match a\n\t\t\tcache behavior associated with the policy.
" } }, "com.amazonaws.cloudfront#ResponseHeadersPolicyContentSecurityPolicy": { @@ -13696,7 +13696,7 @@ } }, "traits": { - "smithy.api#documentation": "The policy directives and their values that CloudFront includes as values for the\n\t\t\t\tContent-Security-Policy
HTTP response header.
For more information about the Content-Security-Policy
HTTP response\n\t\t\theader, see Content-Security-Policy in the MDN Web Docs.
The policy directives and their values that CloudFront includes as values for the\n\t\t\t\tContent-Security-Policy
HTTP response header.
For more information about the Content-Security-Policy
HTTP response\n\t\t\theader, see Content-Security-Policy in the MDN Web Docs.
Determines whether CloudFront includes the X-Content-Type-Options
HTTP response\n\t\t\theader with its value set to nosniff
.
For more information about the X-Content-Type-Options
HTTP response\n\t\t\theader, see X-Content-Type-Options in the MDN Web Docs.
Determines whether CloudFront includes the X-Content-Type-Options
HTTP response\n\t\t\theader with its value set to nosniff
.
For more information about the X-Content-Type-Options
HTTP response\n\t\t\theader, see X-Content-Type-Options in the MDN Web Docs.
A list of origins (domain names) that CloudFront can use as the value for the\n\t\t\t\tAccess-Control-Allow-Origin
HTTP response header.
For more information about the Access-Control-Allow-Origin
HTTP response\n\t\t\theader, see Access-Control-Allow-Origin in the MDN Web Docs.
A list of origins (domain names) that CloudFront can use as the value for the\n\t\t\t\tAccess-Control-Allow-Origin
HTTP response header.
For more information about the Access-Control-Allow-Origin
HTTP response\n\t\t\theader, see Access-Control-Allow-Origin in the MDN Web Docs.
A list of HTTP header names that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Headers
HTTP response header.
For more information about the Access-Control-Allow-Headers
HTTP response\n\t\t\theader, see Access-Control-Allow-Headers in the MDN Web Docs.
A list of HTTP header names that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Headers
HTTP response header.
For more information about the Access-Control-Allow-Headers
HTTP response\n\t\t\theader, see Access-Control-Allow-Headers in the MDN Web Docs.
A list of HTTP methods that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Methods
HTTP response header.
For more information about the Access-Control-Allow-Methods
HTTP response\n\t\t\theader, see Access-Control-Allow-Methods in the MDN Web Docs.
A list of HTTP methods that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Allow-Methods
HTTP response header.
For more information about the Access-Control-Allow-Methods
HTTP response\n\t\t\theader, see Access-Control-Allow-Methods in the MDN Web Docs.
A Boolean that CloudFront uses as the value for the\n\t\t\t\tAccess-Control-Allow-Credentials
HTTP response header.
For more information about the Access-Control-Allow-Credentials
HTTP\n\t\t\tresponse header, see Access-Control-Allow-Credentials in the MDN Web Docs.
A Boolean that CloudFront uses as the value for the\n\t\t\t\tAccess-Control-Allow-Credentials
HTTP response header.
For more information about the Access-Control-Allow-Credentials
HTTP\n\t\t\tresponse header, see Access-Control-Allow-Credentials in the MDN Web Docs.
A list of HTTP headers that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Expose-Headers
HTTP response header.
For more information about the Access-Control-Expose-Headers
HTTP\n\t\t\tresponse header, see Access-Control-Expose-Headers in the MDN Web Docs.
A list of HTTP headers that CloudFront includes as values for the\n\t\t\t\tAccess-Control-Expose-Headers
HTTP response header.
For more information about the Access-Control-Expose-Headers
HTTP\n\t\t\tresponse header, see Access-Control-Expose-Headers in the MDN Web Docs.
A number that CloudFront uses as the value for the Access-Control-Max-Age
HTTP\n\t\t\tresponse header.
For more information about the Access-Control-Max-Age
HTTP response\n\t\t\theader, see Access-Control-Max-Age in the MDN Web Docs.
A number that CloudFront uses as the value for the Access-Control-Max-Age
HTTP\n\t\t\tresponse header.
For more information about the Access-Control-Max-Age
HTTP response\n\t\t\theader, see Access-Control-Max-Age in the MDN Web Docs.
A configuration for a set of HTTP response headers that are used for cross-origin\n\t\t\tresource sharing (CORS). CloudFront adds these headers to HTTP responses that it sends for\n\t\t\tCORS requests that match a cache behavior associated with this response headers\n\t\t\tpolicy.
\nFor more information about CORS, see Cross-Origin Resource\n\t\t\t\tSharing (CORS) in the MDN Web Docs.
" + "smithy.api#documentation": "A configuration for a set of HTTP response headers that are used for cross-origin\n\t\t\tresource sharing (CORS). CloudFront adds these headers to HTTP responses that it sends for\n\t\t\tCORS requests that match a cache behavior associated with this response headers\n\t\t\tpolicy.
\n\t\tFor more information about CORS, see Cross-Origin Resource\n\t\t\t\tSharing (CORS) in the MDN Web Docs.
" } }, "com.amazonaws.cloudfront#ResponseHeadersPolicyCustomHeader": { @@ -13795,7 +13795,7 @@ } }, "traits": { - "smithy.api#documentation": "An HTTP response header name and its value. CloudFront includes this header in HTTP\n\t\t\tresponses that it sends for requests that match a cache behavior that's associated with\n\t\t\tthis response headers policy.
" + "smithy.api#documentation": "An HTTP response header name and its value. CloudFront includes this header in HTTP\n\t\t\tresponses that it sends for requests that match a cache behavior that’s associated with\n\t\t\tthis response headers policy.
" } }, "com.amazonaws.cloudfront#ResponseHeadersPolicyCustomHeaderList": { @@ -13825,7 +13825,7 @@ } }, "traits": { - "smithy.api#documentation": "A list of HTTP response header names and their values. CloudFront includes these headers in\n\t\t\tHTTP responses that it sends for requests that match a cache behavior that's associated\n\t\t\twith this response headers policy.
" + "smithy.api#documentation": "A list of HTTP response header names and their values. CloudFront includes these headers in\n\t\t\tHTTP responses that it sends for requests that match a cache behavior that’s associated\n\t\t\twith this response headers policy.
" } }, "com.amazonaws.cloudfront#ResponseHeadersPolicyFrameOptions": { @@ -13841,13 +13841,13 @@ "FrameOption": { "target": "com.amazonaws.cloudfront#FrameOptionsList", "traits": { - "smithy.api#documentation": "The value of the X-Frame-Options
HTTP response header. Valid values are\n\t\t\t\tDENY
and SAMEORIGIN
.
For more information about these values, see X-Frame-Options in the MDN Web Docs.
", + "smithy.api#documentation": "The value of the X-Frame-Options
HTTP response header. Valid values are\n\t\t\t\tDENY
and SAMEORIGIN
.
For more information about these values, see X-Frame-Options in the MDN Web Docs.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Determines whether CloudFront includes the X-Frame-Options
HTTP response header\n\t\t\tand the header's value.
For more information about the X-Frame-Options
HTTP response header, see\n\t\t\t\tX-Frame-Options in the MDN Web Docs.
Determines whether CloudFront includes the X-Frame-Options
HTTP response header\n\t\t\tand the header’s value.
For more information about the X-Frame-Options
HTTP response header, see\n\t\t\t\tX-Frame-Options in the MDN Web Docs.
Cannot delete the response headers policy because it is attached to one or more cache\n\t\t\tbehaviors in a CloudFront distribution.
", + "smithy.api#documentation": "Cannot delete the response headers policy because it is attached to one or more cache\n\t\t\tbehaviors in a CloudFront distribution.
", "smithy.api#error": "client", "smithy.api#httpError": 409 } @@ -13910,13 +13910,13 @@ "ReferrerPolicy": { "target": "com.amazonaws.cloudfront#ReferrerPolicyList", "traits": { - "smithy.api#documentation": "The value of the Referrer-Policy
HTTP response header. Valid values\n\t\t\tare:
\n no-referrer
\n
\n no-referrer-when-downgrade
\n
\n origin
\n
\n origin-when-cross-origin
\n
\n same-origin
\n
\n strict-origin
\n
\n strict-origin-when-cross-origin
\n
\n unsafe-url
\n
For more information about these values, see Referrer-Policy in the MDN Web Docs.
", + "smithy.api#documentation": "The value of the Referrer-Policy
HTTP response header. Valid values\n\t\t\tare:
\n no-referrer
\n
\n no-referrer-when-downgrade
\n
\n origin
\n
\n origin-when-cross-origin
\n
\n same-origin
\n
\n strict-origin
\n
\n strict-origin-when-cross-origin
\n
\n unsafe-url
\n
For more information about these values, see Referrer-Policy in the MDN Web Docs.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Determines whether CloudFront includes the Referrer-Policy
HTTP response header\n\t\t\tand the header's value.
For more information about the Referrer-Policy
HTTP response header, see\n\t\t\t\tReferrer-Policy in the MDN Web Docs.
Determines whether CloudFront includes the Referrer-Policy
HTTP response header\n\t\t\tand the header’s value.
For more information about the Referrer-Policy
HTTP response header, see\n\t\t\t\tReferrer-Policy in the MDN Web Docs.
Determines whether CloudFront includes the X-XSS-Protection
HTTP response\n\t\t\theader and the header's value.
For more information about the X-XSS-Protection
HTTP response header, see\n\t\t\t\tX-XSS-Protection in the MDN Web Docs.
Determines whether CloudFront includes the X-XSS-Protection
HTTP response\n\t\t\theader and the header’s value.
For more information about the X-XSS-Protection
HTTP response header, see\n\t\t\t\tX-XSS-Protection in the MDN Web Docs.
Determines whether CloudFront includes the X-Frame-Options
HTTP response header\n\t\t\tand the header's value.
For more information about the X-Frame-Options
HTTP response header, see\n\t\t\t\tX-Frame-Options in the MDN Web Docs.
Determines whether CloudFront includes the X-Frame-Options
HTTP response header\n\t\t\tand the header’s value.
For more information about the X-Frame-Options
HTTP response header, see\n\t\t\t\tX-Frame-Options in the MDN Web Docs.
Determines whether CloudFront includes the Referrer-Policy
HTTP response header\n\t\t\tand the header's value.
For more information about the Referrer-Policy
HTTP response header, see\n\t\t\t\tReferrer-Policy in the MDN Web Docs.
Determines whether CloudFront includes the Referrer-Policy
HTTP response header\n\t\t\tand the header’s value.
For more information about the Referrer-Policy
HTTP response header, see\n\t\t\t\tReferrer-Policy in the MDN Web Docs.
The policy directives and their values that CloudFront includes as values for the\n\t\t\t\tContent-Security-Policy
HTTP response header.
For more information about the Content-Security-Policy
HTTP response\n\t\t\theader, see Content-Security-Policy in the MDN Web Docs.
The policy directives and their values that CloudFront includes as values for the\n\t\t\t\tContent-Security-Policy
HTTP response header.
For more information about the Content-Security-Policy
HTTP response\n\t\t\theader, see Content-Security-Policy in the MDN Web Docs.
Determines whether CloudFront includes the X-Content-Type-Options
HTTP response\n\t\t\theader with its value set to nosniff
.
For more information about the X-Content-Type-Options
HTTP response\n\t\t\theader, see X-Content-Type-Options in the MDN Web Docs.
Determines whether CloudFront includes the X-Content-Type-Options
HTTP response\n\t\t\theader with its value set to nosniff
.
For more information about the X-Content-Type-Options
HTTP response\n\t\t\theader, see X-Content-Type-Options in the MDN Web Docs.
Determines whether CloudFront includes the Strict-Transport-Security
HTTP\n\t\t\tresponse header and the header's value.
For more information about the Strict-Transport-Security
HTTP response\n\t\t\theader, see Strict-Transport-Security in the MDN Web Docs.
Determines whether CloudFront includes the Strict-Transport-Security
HTTP\n\t\t\tresponse header and the header’s value.
For more information about the Strict-Transport-Security
HTTP response\n\t\t\theader, see Strict-Transport-Security in the MDN Web Docs.
A configuration for enabling the Server-Timing
header in HTTP responses\n\t\t\tsent from CloudFront. CloudFront adds this header to HTTP responses that it sends in response to\n\t\t\trequests that match a cache behavior that's associated with this response headers\n\t\t\tpolicy.
You can use the Server-Timing
header to view metrics that can help you\n\t\t\tgain insights about the behavior and performance of CloudFront. For example, you can see which\n\t\t\tcache layer served a cache hit, or the first byte latency from the origin when there was\n\t\t\ta cache miss. You can use the metrics in the Server-Timing
header to\n\t\t\ttroubleshoot issues or test the efficiency of your CloudFront configuration. For more\n\t\t\tinformation, see Server-Timing header in the\n\t\t\t\tAmazon CloudFront Developer Guide.
A configuration for enabling the Server-Timing
header in HTTP responses\n\t\t\tsent from CloudFront. CloudFront adds this header to HTTP responses that it sends in response to\n\t\t\trequests that match a cache behavior that's associated with this response headers\n\t\t\tpolicy.
You can use the Server-Timing
header to view metrics that can help you\n\t\t\tgain insights about the behavior and performance of CloudFront. For example, you can see which\n\t\t\tcache layer served a cache hit, or the first byte latency from the origin when there was\n\t\t\ta cache miss. You can use the metrics in the Server-Timing
header to\n\t\t\ttroubleshoot issues or test the efficiency of your CloudFront configuration. For more\n\t\t\tinformation, see Server-Timing header in the\n\t\t\t\tAmazon CloudFront Developer Guide.
Determines whether CloudFront includes the Strict-Transport-Security
HTTP\n\t\t\tresponse header and the header's value.
For more information about the Strict-Transport-Security
HTTP response\n\t\t\theader, see Strict-Transport-Security in the MDN Web Docs.
Determines whether CloudFront includes the Strict-Transport-Security
HTTP\n\t\t\tresponse header and the header’s value.
For more information about the Strict-Transport-Security
HTTP response\n\t\t\theader, see Strict-Transport-Security in the MDN Web Docs.
A Boolean that determines the value of the X-XSS-Protection
HTTP response\n\t\t\theader. When this setting is true
, the value of the\n\t\t\t\tX-XSS-Protection
header is 1
. When this setting is\n\t\t\t\tfalse
, the value of the X-XSS-Protection
header is\n\t\t\t\t0
.
For more information about these settings, see X-XSS-Protection in the MDN Web Docs.
", + "smithy.api#documentation": "A Boolean that determines the value of the X-XSS-Protection
HTTP response\n\t\t\theader. When this setting is true
, the value of the\n\t\t\t\tX-XSS-Protection
header is 1
. When this setting is\n\t\t\t\tfalse
, the value of the X-XSS-Protection
header is\n\t\t\t\t0
.
For more information about these settings, see X-XSS-Protection in the MDN Web Docs.
", "smithy.api#required": {} } }, "ModeBlock": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "A Boolean that determines whether CloudFront includes the mode=block
directive\n\t\t\tin the X-XSS-Protection
header.
For more information about this directive, see X-XSS-Protection in the MDN Web Docs.
" + "smithy.api#documentation": "A Boolean that determines whether CloudFront includes the mode=block
directive\n\t\t\tin the X-XSS-Protection
header.
For more information about this directive, see X-XSS-Protection in the MDN Web Docs.
" } }, "ReportUri": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A reporting URI, which CloudFront uses as the value of the report
directive in\n\t\t\tthe X-XSS-Protection
header.
You cannot specify a ReportUri
when ModeBlock
is\n\t\t\t\ttrue
.
For more information about using a reporting URL, see X-XSS-Protection in the MDN Web Docs.
" + "smithy.api#documentation": "A reporting URI, which CloudFront uses as the value of the report
directive in\n\t\t\tthe X-XSS-Protection
header.
You cannot specify a ReportUri
when ModeBlock
is\n\t\t\t\ttrue
.
For more information about using a reporting URL, see X-XSS-Protection in the MDN Web Docs.
" } } }, "traits": { - "smithy.api#documentation": "Determines whether CloudFront includes the X-XSS-Protection
HTTP response\n\t\t\theader and the header's value.
For more information about the X-XSS-Protection
HTTP response header, see\n\t\t\t\tX-XSS-Protection in the MDN Web Docs.
Determines whether CloudFront includes the X-XSS-Protection
HTTP response\n\t\t\theader and the header’s value.
For more information about the X-XSS-Protection
HTTP response header, see\n\t\t\t\tX-XSS-Protection in the MDN Web Docs.
The DNS name of the Amazon S3 origin.
", + "smithy.api#documentation": "The DNS name of the Amazon S3 origin.
", "smithy.api#required": {} } }, "OriginAccessIdentity": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The CloudFront origin access identity to associate with the distribution. Use an origin\n\t\t\taccess identity to configure the distribution so that end users can only access objects\n\t\t\tin an Amazon S3 bucket through CloudFront.
\nIf you want end users to be able to access objects using either the CloudFront URL or the\n\t\t\tAmazon S3 URL, specify an empty OriginAccessIdentity
element.
To delete the origin access identity from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty OriginAccessIdentity
\n\t\t\telement.
To replace the origin access identity, update the distribution configuration and\n\t\t\tspecify the new origin access identity.
\nFor more information, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content in\n\t\t\tthe Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The CloudFront origin access identity to associate with the distribution. Use an origin\n\t\t\taccess identity to configure the distribution so that end users can only access objects\n\t\t\tin an Amazon S3 bucket through CloudFront.
\n\t\tIf you want end users to be able to access objects using either the CloudFront URL or the\n\t\t\tAmazon S3 URL, specify an empty OriginAccessIdentity
element.
To delete the origin access identity from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty OriginAccessIdentity
\n\t\t\telement.
To replace the origin access identity, update the distribution configuration and\n\t\t\tspecify the new origin access identity.
\n\t\tFor more information, see Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content in\n\t\t\tthe Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } } @@ -14143,7 +14143,7 @@ "OriginAccessIdentity": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The CloudFront origin access identity to associate with the origin. Use an origin access\n\t\t\tidentity to configure the origin so that viewers can only access\n\t\t\tobjects in an Amazon S3 bucket through CloudFront. The format of the value is:
\norigin-access-identity/cloudfront/ID-of-origin-access-identity\n
\nwhere \n ID-of-origin-access-identity\n
is the value that\n\t\t\tCloudFront returned in the ID
element when you created the origin access\n\t\t\tidentity.
If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3\n\t\t\tURL, specify an empty OriginAccessIdentity
element.
To delete the origin access identity from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty OriginAccessIdentity
\n\t\t\telement.
To replace the origin access identity, update the distribution configuration and\n\t\t\tspecify the new origin access identity.
\nFor more information about the origin access identity, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The CloudFront origin access identity to associate with the origin. Use an origin access\n\t\t\tidentity to configure the origin so that viewers can only access\n\t\t\tobjects in an Amazon S3 bucket through CloudFront. The format of the value is:
\n\t\torigin-access-identity/cloudfront/ID-of-origin-access-identity\n
\n\t\twhere \n ID-of-origin-access-identity\n
is the value that\n\t\t\tCloudFront returned in the ID
element when you created the origin access\n\t\t\tidentity.
If you want viewers to be able to access objects using either the CloudFront URL or the Amazon S3\n\t\t\tURL, specify an empty OriginAccessIdentity
element.
To delete the origin access identity from an existing distribution, update the\n\t\t\tdistribution configuration and include an empty OriginAccessIdentity
\n\t\t\telement.
To replace the origin access identity, update the distribution configuration and\n\t\t\tspecify the new origin access identity.
\n\t\tFor more information about the origin access identity, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } } @@ -14190,14 +14190,14 @@ "IdleTTL": { "target": "com.amazonaws.cloudfront#integer", "traits": { - "smithy.api#documentation": "The amount of time after which you want sessions to cease if no requests are\n\t\t\treceived. Allowed values are 300–3600 seconds (5–60 minutes).
\nThe value must be less than or equal to MaximumTTL
.
The amount of time after which you want sessions to cease if no requests are\n\t\t\treceived.\u2028Allowed values are 300–3600 seconds (5–60 minutes).
\n\t\tThe value must be less than or equal to MaximumTTL
.
The maximum amount of time to consider requests from the viewer as being part of the\n\t\t\tsame session. Allowed values are 300–3600 seconds (5–60 minutes).
\nThe value must be less than or equal to IdleTTL
.
The maximum amount of time to consider requests from the viewer as being part of the\n\t\t\tsame session. Allowed values are 300–3600 seconds (5–60 minutes).
\n\t\tThe value must be less than or equal to IdleTTL
.
The date and time that the distribution was last modified.
" + "smithy.api#documentation": "The date and time that the distribution was last modified.
" } }, "DomainName": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The domain name that corresponds to the streaming distribution, for example,\n\t\t\t\ts5c39gqb8ow64r.cloudfront.net
.
The domain name that corresponds to the streaming distribution, for example,\n\t\t\t\ts5c39gqb8ow64r.cloudfront.net
.
A complex type that lists the Amazon Web Services accounts, if any, that you included in the\n\t\t\t\tTrustedSigners
complex type for this distribution. These are the\n\t\t\taccounts that you want to allow to create signed URLs for private content.
The Signer
complex type lists the Amazon Web Services account number of the trusted\n\t\t\tsigner or self
if the signer is the Amazon Web Services account that created the\n\t\t\tdistribution. The Signer
element also includes the IDs of any active CloudFront\n\t\t\tkey pairs that are associated with the trusted signer's Amazon Web Services account. If no\n\t\t\t\tKeyPairId
element appears for a Signer
, that signer can't\n\t\t\tcreate signed URLs.
For more information, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "A complex type that lists the Amazon Web Services accounts, if any, that you included in the\n\t\t\t\tTrustedSigners
complex type for this distribution. These are the\n\t\t\taccounts that you want to allow to create signed URLs for private content.
The Signer
complex type lists the Amazon Web Services account number of the trusted\n\t\t\tsigner or self
if the signer is the Amazon Web Services account that created the\n\t\t\tdistribution. The Signer
element also includes the IDs of any active CloudFront\n\t\t\tkey pairs that are associated with the trusted signer's Amazon Web Services account. If no\n\t\t\t\tKeyPairId
element appears for a Signer
, that signer can't\n\t\t\tcreate signed URLs.
For more information, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, @@ -14428,47 +14428,47 @@ "CallerReference": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "A unique value (for example, a date-time stamp) that ensures that the request can't be\n\t\t\treplayed.
\nIf the value of CallerReference
is new (regardless of the content of the\n\t\t\t\tStreamingDistributionConfig
object), CloudFront creates a new\n\t\t\tdistribution.
If CallerReference
is a value that you already sent in a previous request\n\t\t\tto create a distribution, CloudFront returns a DistributionAlreadyExists
\n\t\t\terror.
A unique value (for example, a date-time stamp) that ensures that the request can't be\n\t\t\treplayed.
\n\t\tIf the value of CallerReference
is new (regardless of the content of the\n\t\t\t\tStreamingDistributionConfig
object), CloudFront creates a new\n\t\t\tdistribution.
If CallerReference
is a value that you already sent in a previous request\n\t\t\tto create a distribution, CloudFront returns a DistributionAlreadyExists
\n\t\t\terror.
A complex type that contains information about the Amazon S3 bucket from which you want\n\t\t\tCloudFront to get your media files for distribution.
", + "smithy.api#documentation": "A complex type that contains information about the Amazon S3 bucket from which you want\n\t\t\tCloudFront to get your media files for distribution.
", "smithy.api#required": {} } }, "Aliases": { "target": "com.amazonaws.cloudfront#Aliases", "traits": { - "smithy.api#documentation": "A complex type that contains information about CNAMEs (alternate domain names), if\n\t\t\tany, for this streaming distribution.
" + "smithy.api#documentation": "A complex type that contains information about CNAMEs (alternate domain names), if\n\t\t\tany, for this streaming distribution.
" } }, "Comment": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "Any comments you want to include about the streaming distribution.
", + "smithy.api#documentation": "Any comments you want to include about the streaming distribution.
", "smithy.api#required": {} } }, "Logging": { "target": "com.amazonaws.cloudfront#StreamingLoggingConfig", "traits": { - "smithy.api#documentation": "A complex type that controls whether access logs are written for the streaming\n\t\t\tdistribution.
" + "smithy.api#documentation": "A complex type that controls whether access logs are written for the streaming\n\t\t\tdistribution.
" } }, "TrustedSigners": { "target": "com.amazonaws.cloudfront#TrustedSigners", "traits": { - "smithy.api#documentation": "A complex type that specifies any Amazon Web Services accounts that you want to permit to create\n\t\t\tsigned URLs for private content. If you want the distribution to use signed URLs,\n\t\t\tinclude this element; if you want the distribution to use public URLs, remove this\n\t\t\telement. For more information, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "A complex type that specifies any Amazon Web Services accounts that you want to permit to create\n\t\t\tsigned URLs for private content. If you want the distribution to use signed URLs,\n\t\t\tinclude this element; if you want the distribution to use public URLs, remove this\n\t\t\telement. For more information, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, "PriceClass": { "target": "com.amazonaws.cloudfront#PriceClass", "traits": { - "smithy.api#documentation": "A complex type that contains information about price class for this streaming\n\t\t\tdistribution.
" + "smithy.api#documentation": "A complex type that contains information about price class for this streaming\n\t\t\tdistribution.
" } }, "Enabled": { @@ -14511,20 +14511,20 @@ "Marker": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The value you provided for the Marker
request parameter.
The value you provided for the Marker
request parameter.
If IsTruncated
is true
, this element is present and contains\n\t\t\tthe value you can use for the Marker
request parameter to continue listing\n\t\t\tyour RTMP distributions where they left off.
If IsTruncated
is true
, this element is present and contains\n\t\t\tthe value you can use for the Marker
request parameter to continue listing\n\t\t\tyour RTMP distributions where they left off.
The value you provided for the MaxItems
request parameter.
The value you provided for the MaxItems
request parameter.
A streaming distribution list.
" + "smithy.api#documentation": "A streaming distribution list.
" } }, "com.amazonaws.cloudfront#StreamingDistributionNotDisabled": { @@ -14579,14 +14579,14 @@ "ARN": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The ARN (Amazon Resource Name) for the streaming distribution. For example:\n\t\t\t\tarn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5
,\n\t\t\twhere 123456789012
is your Amazon Web Services account ID.
The ARN (Amazon Resource Name) for the streaming distribution. For example:\n\t\t\t\tarn:aws:cloudfront::123456789012:streaming-distribution/EDFDVBD632BHDS5
,\n\t\t\twhere 123456789012
is your Amazon Web Services account ID.
Indicates the current status of the distribution. When the status is\n\t\t\t\tDeployed
, the distribution's information is fully propagated throughout\n\t\t\tthe Amazon CloudFront system.
Indicates the current status of the distribution. When the status is\n\t\t\t\tDeployed
, the distribution's information is fully propagated throughout\n\t\t\tthe Amazon CloudFront system.
A complex type that specifies the Amazon Web Services accounts, if any, that you want to allow to\n\t\t\tcreate signed URLs for private content. If you want to require signed URLs in requests\n\t\t\tfor objects in the target origin that match the PathPattern
for this cache\n\t\t\tbehavior, specify true
for Enabled
, and specify the applicable\n\t\t\tvalues for Quantity
and Items
.If you don't want to require\n\t\t\tsigned URLs in requests for objects that match PathPattern
, specify\n\t\t\t\tfalse
for Enabled
and 0
for\n\t\t\t\tQuantity
. Omit Items
. To add, change, or remove one or\n\t\t\tmore trusted signers, change Enabled
to true
(if it's\n\t\t\tcurrently false
), change Quantity
as applicable, and specify\n\t\t\tall of the trusted signers that you want to include in the updated distribution.
For more information, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", + "smithy.api#documentation": "A complex type that specifies the Amazon Web Services accounts, if any, that you want to allow to\n\t\t\tcreate signed URLs for private content. If you want to require signed URLs in requests\n\t\t\tfor objects in the target origin that match the PathPattern
for this cache\n\t\t\tbehavior, specify true
for Enabled
, and specify the applicable\n\t\t\tvalues for Quantity
and Items
.If you don't want to require\n\t\t\tsigned URLs in requests for objects that match PathPattern
, specify\n\t\t\t\tfalse
for Enabled
and 0
for\n\t\t\t\tQuantity
. Omit Items
. To add, change, or remove one or\n\t\t\tmore trusted signers, change Enabled
to true
(if it's\n\t\t\tcurrently false
), change Quantity
as applicable, and specify\n\t\t\tall of the trusted signers that you want to include in the updated distribution.
For more information, see Serving Private\n\t\t\t\tContent through CloudFront in the Amazon CloudFront Developer Guide.
", "smithy.api#required": {} } }, @@ -14635,7 +14635,7 @@ "PriceClass": { "target": "com.amazonaws.cloudfront#PriceClass", "traits": { - "smithy.api#documentation": "A complex type that contains information about price class for this streaming\n\t\t\tdistribution.
", + "smithy.api#documentation": "A complex type that contains information about price class for this streaming\n\t\t\tdistribution.
", "smithy.api#required": {} } }, @@ -14648,7 +14648,7 @@ } }, "traits": { - "smithy.api#documentation": "A summary of the information for a CloudFront streaming distribution.
" + "smithy.api#documentation": "A summary of the information for a CloudFront streaming distribution.
" } }, "com.amazonaws.cloudfront#StreamingDistributionSummaryList": { @@ -14666,7 +14666,7 @@ "Enabled": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't\n\t\t\twant to enable logging when you create a streaming distribution or if you want to\n\t\t\tdisable logging for an existing streaming distribution, specify false
for\n\t\t\t\tEnabled
, and specify empty Bucket
and Prefix
\n\t\t\telements. If you specify false
for Enabled
but you specify\n\t\t\tvalues for Bucket
and Prefix
, the values are automatically\n\t\t\tdeleted.
Specifies whether you want CloudFront to save access logs to an Amazon S3 bucket. If you don't\n\t\t\twant to enable logging when you create a streaming distribution or if you want to\n\t\t\tdisable logging for an existing streaming distribution, specify false
for\n\t\t\t\tEnabled
, and specify empty Bucket
and Prefix
\n\t\t\telements. If you specify false
for Enabled
but you specify\n\t\t\tvalues for Bucket
and Prefix
, the values are automatically\n\t\t\tdeleted.
A string that contains Tag
key.
The string length should be between 1 and 128 characters. Valid characters include\n\t\t\t\ta-z
, A-Z
, 0-9
, space, and the special\n\t\t\tcharacters _ - . : / = + @
.
A string that contains Tag
key.
The string length should be between 1 and 128 characters. Valid characters include\n\t\t\t\ta-z
, A-Z
, 0-9
, space, and the special\n\t\t\tcharacters _ - . : / = + @
.
A string that contains an optional Tag
value.
The string length should be between 0 and 256 characters. Valid characters include\n\t\t\t\ta-z
, A-Z
, 0-9
, space, and the special\n\t\t\tcharacters _ - . : / = + @
.
A string that contains an optional Tag
value.
The string length should be between 0 and 256 characters. Valid characters include\n\t\t\t\ta-z
, A-Z
, 0-9
, space, and the special\n\t\t\tcharacters _ - . : / = + @
.
A complex type that contains Tag
key and Tag
value.
A complex type that contains Tag
key and Tag
value.
A string that contains Tag
key.
The string length should be between 1 and 128 characters. Valid characters include\n\t\t\t\ta-z
, A-Z
, 0-9
, space, and the special\n\t\t\tcharacters _ - . : / = + @
.
A string that contains Tag
key.
The string length should be between 1 and 128 characters. Valid characters include\n\t\t\t\ta-z
, A-Z
, 0-9
, space, and the special\n\t\t\tcharacters _ - . : / = + @
.
A complex type that contains Tag
key elements.
A complex type that contains Tag
key elements.
A complex type that contains zero or more Tag
elements.
A complex type that contains zero or more Tag
elements.
An ARN of a CloudFront resource.
", + "smithy.api#documentation": "An ARN of a CloudFront resource.
", "smithy.api#httpQuery": "Resource", "smithy.api#required": {} } @@ -14798,7 +14798,7 @@ "Tags": { "target": "com.amazonaws.cloudfront#Tags", "traits": { - "smithy.api#documentation": "A complex type that contains zero or more Tag
elements.
A complex type that contains zero or more Tag
elements.
The request to add tags to a CloudFront resource.
" + "smithy.api#documentation": "The request to add tags to a CloudFront resource.
" } }, "com.amazonaws.cloudfront#TagValue": { @@ -14825,12 +14825,12 @@ "Items": { "target": "com.amazonaws.cloudfront#TagList", "traits": { - "smithy.api#documentation": "A complex type that contains Tag
elements.
A complex type that contains Tag
elements.
A complex type that contains zero or more Tag
elements.
A complex type that contains zero or more Tag
elements.
Tests a CloudFront function.
\nTo test a function, you provide an event object that represents\n\t\t\tan HTTP request or response that your CloudFront distribution could receive in production.\n\t\t\tCloudFront runs the function, passing it the event object that you provided, and returns the\n\t\t\tfunction's result (the modified event object) in the response. The response also\n\t\t\tcontains function logs and error messages, if any exist. For more information about\n\t\t\ttesting functions, see Testing functions in the Amazon CloudFront Developer Guide.
\nTo test a function, you provide the function's name and version (ETag
\n\t\t\tvalue) along with the event object. To get the function's name and version, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
Tests a CloudFront function.
\n\t\tTo test a function, you provide an event object that represents\n\t\t\tan HTTP request or response that your CloudFront distribution could receive in production.\n\t\t\tCloudFront runs the function, passing it the event object that you provided, and returns the\n\t\t\tfunction’s result (the modified event object) in the response. The response also\n\t\t\tcontains function logs and error messages, if any exist. For more information about\n\t\t\ttesting functions, see Testing functions in the Amazon CloudFront Developer Guide.
\n\t\tTo test a function, you provide the function’s name and version (ETag
\n\t\t\tvalue) along with the event object. To get the function’s name and version, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
The length of the Content-Security-Policy
header value in the response\n\t\t\theaders policy exceeds the maximum.
For more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The length of the Content-Security-Policy
header value in the response\n\t\t\theaders policy exceeds the maximum.
For more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15089,7 +15089,7 @@ } }, "traits": { - "smithy.api#documentation": "The number of custom headers in the response headers policy exceeds the\n\t\t\tmaximum.
\nFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The number of custom headers in the response headers policy exceeds the\n\t\t\tmaximum.
\n\t\tFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15167,7 +15167,7 @@ } }, "traits": { - "smithy.api#documentation": "The maximum number of distributions have been associated with the specified origin\n\t\t\taccess control.
\nFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The maximum number of distributions have been associated with the specified origin\n\t\t\taccess control.
\n\t\tFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15193,7 +15193,7 @@ } }, "traits": { - "smithy.api#documentation": "The maximum number of distributions have been associated with the specified response\n\t\t\theaders policy.
\nFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The maximum number of distributions have been associated with the specified response\n\t\t\theaders policy.
\n\t\tFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15440,7 +15440,7 @@ } }, "traits": { - "smithy.api#documentation": "The number of origin access controls in your Amazon Web Services account exceeds the maximum\n\t\t\tallowed.
\nFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "The number of origin access controls in your Amazon Web Services account exceeds the maximum\n\t\t\tallowed.
\n\t\tFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15583,7 +15583,7 @@ } }, "traits": { - "smithy.api#documentation": "You have reached the maximum number of response headers policies for this\n\t\t\tAmazon Web Services account.
\nFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", + "smithy.api#documentation": "You have reached the maximum number of response headers policies for this\n\t\t\tAmazon Web Services account.
\n\t\tFor more information, see Quotas (formerly known as limits) in the\n\t\t\t\tAmazon CloudFront Developer Guide.
", "smithy.api#error": "client", "smithy.api#httpError": 400 } @@ -15795,7 +15795,7 @@ "Resource": { "target": "com.amazonaws.cloudfront#ResourceARN", "traits": { - "smithy.api#documentation": "An ARN of a CloudFront resource.
", + "smithy.api#documentation": "An ARN of a CloudFront resource.
", "smithy.api#httpQuery": "Resource", "smithy.api#required": {} } @@ -15803,7 +15803,7 @@ "TagKeys": { "target": "com.amazonaws.cloudfront#TagKeys", "traits": { - "smithy.api#documentation": "A complex type that contains zero or more Tag
key elements.
A complex type that contains zero or more Tag
key elements.
The request to remove tags from a CloudFront resource.
" + "smithy.api#documentation": "The request to remove tags from a CloudFront resource.
" } }, "com.amazonaws.cloudfront#UpdateCachePolicy": { @@ -15858,7 +15858,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a cache policy configuration.
\nWhen you update a cache policy configuration, all the fields are updated with the\n\t\t\tvalues provided in the request. You cannot update some fields independent of others. To\n\t\t\tupdate a cache policy configuration:
\nUse GetCachePolicyConfig
to get the current configuration.
Locally modify the fields in the cache policy configuration that you want to\n\t\t\t\t\tupdate.
\nCall UpdateCachePolicy
by providing the entire cache policy\n\t\t\t\t\tconfiguration, including the fields that you modified and those that you\n\t\t\t\t\tdidn't.
Updates a cache policy configuration.
\n\t\tWhen you update a cache policy configuration, all the fields are updated with the\n\t\t\tvalues provided in the request. You cannot update some fields independent of others. To\n\t\t\tupdate a cache policy configuration:
\n\t\tUse GetCachePolicyConfig
to get the current configuration.
Locally modify the fields in the cache policy configuration that you want to\n\t\t\t\t\tupdate.
\n\t\t\tCall UpdateCachePolicy
by providing the entire cache policy\n\t\t\t\t\tconfiguration, including the fields that you modified and those that you\n\t\t\t\t\tdidn’t.
The unique identifier for the cache policy that you are updating. The identifier is\n\t\t\treturned in a cache behavior's CachePolicyId
field in the response to\n\t\t\t\tGetDistributionConfig
.
The unique identifier for the cache policy that you are updating. The identifier is\n\t\t\treturned in a cache behavior’s CachePolicyId
field in the response to\n\t\t\t\tGetDistributionConfig
.
The version of the cache policy that you are updating. The version is returned in the\n\t\t\tcache policy's ETag
field in the response to\n\t\t\t\tGetCachePolicyConfig
.
The version of the cache policy that you are updating. The version is returned in the\n\t\t\tcache policy’s ETag
field in the response to\n\t\t\t\tGetCachePolicyConfig
.
Update an origin access identity.
", + "smithy.api#documentation": "Update an origin access identity.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-05-31/origin-access-identity/cloudfront/{Id}/config", @@ -16043,7 +16043,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a continuous deployment policy. You can update a continuous deployment policy\n\t\t\tto enable or disable it, to change the percentage of traffic that it sends to the\n\t\t\tstaging distribution, or to change the staging distribution that it sends traffic\n\t\t\tto.
\nWhen you update a continuous deployment policy configuration, all the fields are\n\t\t\tupdated with the values that are provided in the request. You cannot update some fields\n\t\t\tindependent of others. To update a continuous deployment policy configuration:
\nUse GetContinuousDeploymentPolicyConfig
to get the current\n\t\t\t\t\tconfiguration.
Locally modify the fields in the continuous deployment policy configuration\n\t\t\t\t\tthat you want to update.
\nUse UpdateContinuousDeploymentPolicy
, providing the entire\n\t\t\t\t\tcontinuous deployment policy configuration, including the fields that you\n\t\t\t\t\tmodified and those that you didn't.
Updates a continuous deployment policy. You can update a continuous deployment policy\n\t\t\tto enable or disable it, to change the percentage of traffic that it sends to the\n\t\t\tstaging distribution, or to change the staging distribution that it sends traffic\n\t\t\tto.
\n\t\tWhen you update a continuous deployment policy configuration, all the fields are\n\t\t\tupdated with the values that are provided in the request. You cannot update some fields\n\t\t\tindependent of others. To update a continuous deployment policy configuration:
\n\t\tUse GetContinuousDeploymentPolicyConfig
to get the current\n\t\t\t\t\tconfiguration.
Locally modify the fields in the continuous deployment policy configuration\n\t\t\t\t\tthat you want to update.
\n\t\t\tUse UpdateContinuousDeploymentPolicy
, providing the entire\n\t\t\t\t\tcontinuous deployment policy configuration, including the fields that you\n\t\t\t\t\tmodified and those that you didn’t.
Updates the configuration for a CloudFront distribution.
\nThe update process includes getting the current distribution configuration, updating\n\t\t\tit to make your changes, and then submitting an UpdateDistribution
request\n\t\t\tto make the updates.
\n To update a web distribution using the CloudFront\n\t\t\tAPI\n
\nUse GetDistributionConfig
to get the current configuration,\n\t\t\t\t\tincluding the version identifier (ETag
).
Update the distribution configuration that was returned in the response. Note\n\t\t\t\t\tthe following important requirements and restrictions:
\nYou must rename the ETag
field to IfMatch
,\n\t\t\t\t\t\t\tleaving the value unchanged. (Set the value of IfMatch
to\n\t\t\t\t\t\t\tthe value of ETag
, then remove the ETag
\n\t\t\t\t\t\t\tfield.)
You can't change the value of CallerReference
.
Submit an UpdateDistribution
request, providing the distribution\n\t\t\t\t\tconfiguration. The new configuration replaces the existing configuration. The\n\t\t\t\t\tvalues that you specify in an UpdateDistribution
request are not\n\t\t\t\t\tmerged into your existing configuration. Make sure to include all fields: the\n\t\t\t\t\tones that you modified and also the ones that you didn't.
Updates the configuration for a CloudFront distribution.
\n\t\tThe update process includes getting the current distribution configuration, updating\n\t\t\tit to make your changes, and then submitting an UpdateDistribution
request\n\t\t\tto make the updates.
\n To update a web distribution using the CloudFront\n\t\t\tAPI\n
\n\t\tUse GetDistributionConfig
to get the current configuration,\n\t\t\t\t\tincluding the version identifier (ETag
).
Update the distribution configuration that was returned in the response. Note\n\t\t\t\t\tthe following important requirements and restrictions:
\n\t\t\t\tYou must rename the ETag
field to IfMatch
,\n\t\t\t\t\t\t\tleaving the value unchanged. (Set the value of IfMatch
to\n\t\t\t\t\t\t\tthe value of ETag
, then remove the ETag
\n\t\t\t\t\t\t\tfield.)
You can’t change the value of CallerReference
.
Submit an UpdateDistribution
request, providing the distribution\n\t\t\t\t\tconfiguration. The new configuration replaces the existing configuration. The\n\t\t\t\t\tvalues that you specify in an UpdateDistribution
request are not\n\t\t\t\t\tmerged into your existing configuration. Make sure to include all fields: the\n\t\t\t\t\tones that you modified and also the ones that you didn’t.
Copies the staging distribution's configuration to its corresponding primary\n\t\t\tdistribution. The primary distribution retains its Aliases
(also known as\n\t\t\talternate domain names or CNAMEs) and ContinuousDeploymentPolicyId
value,\n\t\t\tbut otherwise its configuration is overwritten to match the staging distribution.
You can use this operation in a continuous deployment workflow after you have tested\n\t\t\tconfiguration changes on the staging distribution. After using a continuous deployment\n\t\t\tpolicy to move a portion of your domain name's traffic to the staging distribution and\n\t\t\tverifying that it works as intended, you can use this operation to copy the staging\n\t\t\tdistribution's configuration to the primary distribution. This action will disable the\n\t\t\tcontinuous deployment policy and move your domain's traffic back to the primary distribution.
", + "smithy.api#documentation": "Copies the staging distribution's configuration to its corresponding primary\n\t\t\tdistribution. The primary distribution retains its Aliases
(also known as\n\t\t\talternate domain names or CNAMEs) and ContinuousDeploymentPolicyId
value,\n\t\t\tbut otherwise its configuration is overwritten to match the staging distribution.
You can use this operation in a continuous deployment workflow after you have tested\n\t\t\tconfiguration changes on the staging distribution. After using a continuous deployment\n\t\t\tpolicy to move a portion of your domain name’s traffic to the staging distribution and\n\t\t\tverifying that it works as intended, you can use this operation to copy the staging\n\t\t\tdistribution’s configuration to the primary distribution. This action will disable the\n\t\t\tcontinuous deployment policy and move your domain’s traffic back to the primary distribution.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-05-31/distribution/{Id}/promote-staging-config", @@ -16587,7 +16587,7 @@ "IfMatch": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The current versions (ETag
values) of both primary and staging distributions.\n\t\t\tProvide these in the following format:
\n
\n
The current versions (ETag
values) of both primary and staging distributions.\n\t\t\tProvide these in the following format:
\n
\n
The current version of the primary distribution (after it's updated).
", + "smithy.api#documentation": "The current version of the primary distribution (after it’s updated).
", "smithy.api#httpHeader": "ETag" } } @@ -16655,7 +16655,7 @@ } ], "traits": { - "smithy.api#documentation": "Update a field-level encryption configuration.
", + "smithy.api#documentation": "Update a field-level encryption configuration.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-05-31/field-level-encryption/{Id}/config", @@ -16669,7 +16669,7 @@ "FieldLevelEncryptionConfig": { "target": "com.amazonaws.cloudfront#FieldLevelEncryptionConfig", "traits": { - "smithy.api#documentation": "Request to update a field-level encryption configuration.
", + "smithy.api#documentation": "Request to update a field-level encryption configuration.
", "smithy.api#httpPayload": {}, "smithy.api#required": {}, "smithy.api#xmlName": "FieldLevelEncryptionConfig" @@ -16758,7 +16758,7 @@ } ], "traits": { - "smithy.api#documentation": "Update a field-level encryption profile.
", + "smithy.api#documentation": "Update a field-level encryption profile.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-05-31/field-level-encryption-profile/{Id}/config", @@ -16772,7 +16772,7 @@ "FieldLevelEncryptionProfileConfig": { "target": "com.amazonaws.cloudfront#FieldLevelEncryptionProfileConfig", "traits": { - "smithy.api#documentation": "Request to update a field-level encryption profile.
", + "smithy.api#documentation": "Request to update a field-level encryption profile.
", "smithy.api#httpPayload": {}, "smithy.api#required": {}, "smithy.api#xmlName": "FieldLevelEncryptionProfileConfig" @@ -16781,7 +16781,7 @@ "Id": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The ID of the field-level encryption profile request.
", + "smithy.api#documentation": "The ID of the field-level encryption profile request.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -16808,7 +16808,7 @@ "ETag": { "target": "com.amazonaws.cloudfront#string", "traits": { - "smithy.api#documentation": "The result of the field-level encryption profile request.
", + "smithy.api#documentation": "The result of the field-level encryption profile request.
", "smithy.api#httpHeader": "ETag" } } @@ -16843,7 +16843,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a CloudFront function.
\nYou can update a function's code or the comment that describes the function. You\n\t\t\tcannot update a function's name.
\nTo update a function, you provide the function's name and version (ETag
\n\t\t\tvalue) along with the updated function code. To get the name and version, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
Updates a CloudFront function.
\n\t\tYou can update a function’s code or the comment that describes the function. You\n\t\t\tcannot update a function’s name.
\n\t\tTo update a function, you provide the function’s name and version (ETag
\n\t\t\tvalue) along with the updated function code. To get the name and version, you can use\n\t\t\t\tListFunctions
and DescribeFunction
.
Updates a key group.
\nWhen you update a key group, all the fields are updated with the values provided in\n\t\t\tthe request. You cannot update some fields independent of others. To update a key\n\t\t\tgroup:
\nGet the current key group with GetKeyGroup
or\n\t\t\t\t\t\tGetKeyGroupConfig
.
Locally modify the fields in the key group that you want to update. For\n\t\t\t\t\texample, add or remove public key IDs.
\nCall UpdateKeyGroup
with the entire key group object, including\n\t\t\t\t\tthe fields that you modified and those that you didn't.
Updates a key group.
\n\t\tWhen you update a key group, all the fields are updated with the values provided in\n\t\t\tthe request. You cannot update some fields independent of others. To update a key\n\t\t\tgroup:
\n\t\tGet the current key group with GetKeyGroup
or\n\t\t\t\t\t\tGetKeyGroupConfig
.
Locally modify the fields in the key group that you want to update. For\n\t\t\t\t\texample, add or remove public key IDs.
\n\t\t\tCall UpdateKeyGroup
with the entire key group object, including\n\t\t\t\t\tthe fields that you modified and those that you didn’t.
The version of the key group that you are updating. The version is the key group's\n\t\t\t\tETag
value.
The version of the key group that you are updating. The version is the key group’s\n\t\t\t\tETag
value.
Updates an origin request policy configuration.
\nWhen you update an origin request policy configuration, all the fields are updated\n\t\t\twith the values provided in the request. You cannot update some fields independent of\n\t\t\tothers. To update an origin request policy configuration:
\nUse GetOriginRequestPolicyConfig
to get the current\n\t\t\t\t\tconfiguration.
Locally modify the fields in the origin request policy configuration that you\n\t\t\t\t\twant to update.
\nCall UpdateOriginRequestPolicy
by providing the entire origin\n\t\t\t\t\trequest policy configuration, including the fields that you modified and those\n\t\t\t\t\tthat you didn't.
Updates an origin request policy configuration.
\n\t\tWhen you update an origin request policy configuration, all the fields are updated\n\t\t\twith the values provided in the request. You cannot update some fields independent of\n\t\t\tothers. To update an origin request policy configuration:
\n\t\tUse GetOriginRequestPolicyConfig
to get the current\n\t\t\t\t\tconfiguration.
Locally modify the fields in the origin request policy configuration that you\n\t\t\t\t\twant to update.
\n\t\t\tCall UpdateOriginRequestPolicy
by providing the entire origin\n\t\t\t\t\trequest policy configuration, including the fields that you modified and those\n\t\t\t\t\tthat you didn’t.
The unique identifier for the origin request policy that you are updating. The\n\t\t\tidentifier is returned in a cache behavior's OriginRequestPolicyId
field in\n\t\t\tthe response to GetDistributionConfig
.
The unique identifier for the origin request policy that you are updating. The\n\t\t\tidentifier is returned in a cache behavior’s OriginRequestPolicyId
field in\n\t\t\tthe response to GetDistributionConfig
.
The version of the origin request policy that you are updating. The version is\n\t\t\treturned in the origin request policy's ETag
field in the response to\n\t\t\t\tGetOriginRequestPolicyConfig
.
The version of the origin request policy that you are updating. The version is\n\t\t\treturned in the origin request policy’s ETag
field in the response to\n\t\t\t\tGetOriginRequestPolicyConfig
.
Updates a real-time log configuration.
\nWhen you update a real-time log configuration, all the parameters are updated with the\n\t\t\tvalues provided in the request. You cannot update some parameters independent of others.\n\t\t\tTo update a real-time log configuration:
\nCall GetRealtimeLogConfig
to get the current real-time log\n\t\t\t\t\tconfiguration.
Locally modify the parameters in the real-time log configuration that you want\n\t\t\t\t\tto update.
\nCall this API (UpdateRealtimeLogConfig
) by providing the entire\n\t\t\t\t\treal-time log configuration, including the parameters that you modified and\n\t\t\t\t\tthose that you didn't.
You cannot update a real-time log configuration's Name
or\n\t\t\t\tARN
.
Updates a real-time log configuration.
\n\t\tWhen you update a real-time log configuration, all the parameters are updated with the\n\t\t\tvalues provided in the request. You cannot update some parameters independent of others.\n\t\t\tTo update a real-time log configuration:
\n\t\tCall GetRealtimeLogConfig
to get the current real-time log\n\t\t\t\t\tconfiguration.
Locally modify the parameters in the real-time log configuration that you want\n\t\t\t\t\tto update.
\n\t\t\tCall this API (UpdateRealtimeLogConfig
) by providing the entire\n\t\t\t\t\treal-time log configuration, including the parameters that you modified and\n\t\t\t\t\tthose that you didn’t.
You cannot update a real-time log configuration’s Name
or\n\t\t\t\tARN
.
A list of fields to include in each real-time log record.
\nFor more information about fields, see Real-time log configuration fields in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A list of fields to include in each real-time log record.
\n\t\tFor more information about fields, see Real-time log configuration fields in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "Name": { @@ -17381,7 +17381,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a response headers policy.
\nWhen you update a response headers policy, the entire policy is replaced. You cannot\n\t\t\tupdate some policy fields independent of others. To update a response headers policy\n\t\t\tconfiguration:
\nUse GetResponseHeadersPolicyConfig
to get the current policy's\n\t\t\t\t\tconfiguration.
Modify the fields in the response headers policy configuration that you want\n\t\t\t\t\tto update.
\nCall UpdateResponseHeadersPolicy
, providing the entire response\n\t\t\t\t\theaders policy configuration, including the fields that you modified and those\n\t\t\t\t\tthat you didn't.
Updates a response headers policy.
\n\t\tWhen you update a response headers policy, the entire policy is replaced. You cannot\n\t\t\tupdate some policy fields independent of others. To update a response headers policy\n\t\t\tconfiguration:
\n\t\tUse GetResponseHeadersPolicyConfig
to get the current policy’s\n\t\t\t\t\tconfiguration.
Modify the fields in the response headers policy configuration that you want\n\t\t\t\t\tto update.
\n\t\t\tCall UpdateResponseHeadersPolicy
, providing the entire response\n\t\t\t\t\theaders policy configuration, including the fields that you modified and those\n\t\t\t\t\tthat you didn’t.
The version of the response headers policy that you are updating.
\nThe version is returned in the cache policy's ETag
field in the response\n\t\t\tto GetResponseHeadersPolicyConfig
.
The version of the response headers policy that you are updating.
\n\t\tThe version is returned in the cache policy’s ETag
field in the response\n\t\t\tto GetResponseHeadersPolicyConfig
.
Update a streaming distribution.
", + "smithy.api#documentation": "Update a streaming distribution.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-05-31/streaming-distribution/{Id}/config", @@ -17558,50 +17558,50 @@ "CloudFrontDefaultCertificate": { "target": "com.amazonaws.cloudfront#boolean", "traits": { - "smithy.api#documentation": "If the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
, set this field to\n\t\t\ttrue
.
If the distribution uses Aliases
(alternate domain names or CNAMEs), set\n\t\t\tthis field to false
and specify values for the following fields:
\n ACMCertificateArn
or IAMCertificateId
(specify a\n\t\t\t\t\tvalue for one, not both)
\n MinimumProtocolVersion
\n
\n SSLSupportMethod
\n
If the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
, set this field to\n\t\t\ttrue
.
If the distribution uses Aliases
(alternate domain names or CNAMEs), set\n\t\t\tthis field to false
and specify values for the following fields:
\n ACMCertificateArn
or IAMCertificateId
(specify a\n\t\t\t\t\tvalue for one, not both)
\n MinimumProtocolVersion
\n
\n SSLSupportMethod
\n
If the distribution uses Aliases
(alternate domain names or CNAMEs) and\n\t\t\tthe SSL/TLS certificate is stored in Identity and Access Management (IAM), provide the ID of the IAM certificate.
If you specify an IAM certificate ID, you must also specify values for\n\t\t\t\tMinimumProtocolVersion
and SSLSupportMethod
.
If the distribution uses Aliases
(alternate domain names or CNAMEs) and\n\t\t\tthe SSL/TLS certificate is stored in Identity and Access Management (IAM), provide the ID of the IAM certificate.
If you specify an IAM certificate ID, you must also specify values for\n\t\t\t\tMinimumProtocolVersion
and SSLSupportMethod
.
If the distribution uses Aliases
(alternate domain names or CNAMEs) and\n\t\t\tthe SSL/TLS certificate is stored in Certificate Manager (ACM), provide the Amazon Resource Name\n\t\t\t(ARN) of the ACM certificate. CloudFront only supports ACM certificates in the US East\n\t\t\t(N. Virginia) Region (us-east-1
).
If you specify an ACM certificate ARN, you must also specify values for\n\t\t\t\tMinimumProtocolVersion
and SSLSupportMethod
.
If the distribution uses Aliases
(alternate domain names or CNAMEs) and\n\t\t\tthe SSL/TLS certificate is stored in Certificate Manager (ACM), provide the Amazon Resource Name\n\t\t\t(ARN) of the ACM certificate. CloudFront only supports ACM certificates in the US East\n\t\t\t(N. Virginia) Region (us-east-1
).
If you specify an ACM certificate ARN, you must also specify values for\n\t\t\t\tMinimumProtocolVersion
and SSLSupportMethod
.
If the distribution uses Aliases
(alternate domain names or CNAMEs),\n\t\t\tspecify which viewers the distribution accepts HTTPS connections from.
\n sni-only
– The distribution accepts HTTPS connections from only\n\t\t\t\t\tviewers that support server name\n\t\t\t\t\t\tindication (SNI). This is recommended. Most browsers and clients\n\t\t\t\t\tsupport SNI.
\n vip
– The distribution accepts HTTPS connections from all viewers\n\t\t\t\t\tincluding those that don't support SNI. This is not recommended, and results in\n\t\t\t\t\tadditional monthly charges from CloudFront.
\n static-ip
- Do not specify this value unless your distribution\n\t\t\t\t\thas been enabled for this feature by the CloudFront team. If you have a use case\n\t\t\t\t\tthat requires static IP addresses for a distribution, contact CloudFront through\n\t\t\t\t\tthe Amazon Web Services Support Center.
If the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
, don't set a value for this field.
If the distribution uses Aliases
(alternate domain names or CNAMEs),\n\t\t\tspecify which viewers the distribution accepts HTTPS connections from.
\n sni-only
– The distribution accepts HTTPS connections from only\n\t\t\t\t\tviewers that support server name\n\t\t\t\t\t\tindication (SNI). This is recommended. Most browsers and clients\n\t\t\t\t\tsupport SNI.
\n vip
– The distribution accepts HTTPS connections from all viewers\n\t\t\t\t\tincluding those that don’t support SNI. This is not recommended, and results in\n\t\t\t\t\tadditional monthly charges from CloudFront.
\n static-ip
- Do not specify this value unless your distribution\n\t\t\t\t\thas been enabled for this feature by the CloudFront team. If you have a use case\n\t\t\t\t\tthat requires static IP addresses for a distribution, contact CloudFront through\n\t\t\t\t\tthe Amazon Web Services Support Center.
If the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
, don’t set a value for this field.
If the distribution uses Aliases
(alternate domain names or CNAMEs),\n\t\t\tspecify the security policy that you want CloudFront to use for HTTPS connections with\n\t\t\tviewers. The security policy determines two settings:
The minimum SSL/TLS protocol that CloudFront can use to communicate with\n\t\t\t\t\tviewers.
\nThe ciphers that CloudFront can use to encrypt the content that it returns to\n\t\t\t\t\tviewers.
\nFor more information, see Security Policy and Supported Protocols and Ciphers Between Viewers and\n\t\t\t\tCloudFront in the Amazon CloudFront Developer Guide.
\nOn the CloudFront console, this setting is called Security\n\t\t\t\t\tPolicy.
\nWhen you're using SNI only (you set SSLSupportMethod
to\n\t\t\t\tsni-only
), you must specify TLSv1
or higher.
If the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
(you set\n\t\t\t\tCloudFrontDefaultCertificate
to true
), CloudFront automatically\n\t\t\tsets the security policy to TLSv1
regardless of the value that you set\n\t\t\there.
If the distribution uses Aliases
(alternate domain names or CNAMEs),\n\t\t\tspecify the security policy that you want CloudFront to use for HTTPS connections with\n\t\t\tviewers. The security policy determines two settings:
The minimum SSL/TLS protocol that CloudFront can use to communicate with\n\t\t\t\t\tviewers.
\n\t\t\tThe ciphers that CloudFront can use to encrypt the content that it returns to\n\t\t\t\t\tviewers.
\n\t\t\tFor more information, see Security Policy and Supported Protocols and Ciphers Between Viewers and\n\t\t\t\tCloudFront in the Amazon CloudFront Developer Guide.
\n\t\tOn the CloudFront console, this setting is called Security\n\t\t\t\t\tPolicy.
\n\t\tWhen you’re using SNI only (you set SSLSupportMethod
to\n\t\t\t\tsni-only
), you must specify TLSv1
or higher.
If the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
(you set\n\t\t\t\tCloudFrontDefaultCertificate
to true
), CloudFront automatically\n\t\t\tsets the security policy to TLSv1
regardless of the value that you set\n\t\t\there.
This field is deprecated. Use one of the following fields instead:
\n\n ACMCertificateArn
\n
\n IAMCertificateId
\n
\n CloudFrontDefaultCertificate
\n
This field is deprecated. Use one of the following fields instead:
\n\t\t\n ACMCertificateArn
\n
\n IAMCertificateId
\n
\n CloudFrontDefaultCertificate
\n
This field is deprecated. Use one of the following fields instead:
\n\n ACMCertificateArn
\n
\n IAMCertificateId
\n
\n CloudFrontDefaultCertificate
\n
This field is deprecated. Use one of the following fields instead:
\n\t\t\n ACMCertificateArn
\n
\n IAMCertificateId
\n
\n CloudFrontDefaultCertificate
\n
A complex type that determines the distribution's SSL/TLS configuration for\n\t\t\tcommunicating with viewers.
\nIf the distribution doesn't use Aliases
(also known as alternate domain\n\t\t\tnames or CNAMEs)—that is, if the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
—set\n\t\t\t\tCloudFrontDefaultCertificate
to true
and leave all other\n\t\t\tfields empty.
If the distribution uses Aliases
(alternate domain names or CNAMEs), use\n\t\t\tthe fields in this type to specify the following settings:
Which viewers the distribution accepts HTTPS connections from: only viewers\n\t\t\t\t\tthat support server name indication (SNI) (recommended), or all viewers\n\t\t\t\t\tincluding those that don't support SNI.
\nTo accept HTTPS connections from only viewers that support SNI, set\n\t\t\t\t\t\t\t\tSSLSupportMethod
to sni-only
. This is\n\t\t\t\t\t\t\trecommended. Most browsers and clients support SNI.
To accept HTTPS connections from all viewers, including those that\n\t\t\t\t\t\t\tdon't support SNI, set SSLSupportMethod
to\n\t\t\t\t\t\t\tvip
. This is not recommended, and results in additional\n\t\t\t\t\t\t\tmonthly charges from CloudFront.
The minimum SSL/TLS protocol version that the distribution can use to\n\t\t\t\t\tcommunicate with viewers. To specify a minimum version, choose a value for\n\t\t\t\t\t\tMinimumProtocolVersion
. For more information, see Security Policy in the\n\t\t\t\t\t\tAmazon CloudFront Developer Guide.
The location of the SSL/TLS certificate, Certificate Manager\n\t\t\t\t\t\t(ACM) (recommended) or Identity and Access Management (IAM). You specify the location by\n\t\t\t\t\tsetting a value in one of the following fields (not both):
\n\n ACMCertificateArn
\n
\n IAMCertificateId
\n
All distributions support HTTPS connections from viewers. To require viewers to use\n\t\t\tHTTPS only, or to redirect them from HTTP to HTTPS, use\n\t\t\t\tViewerProtocolPolicy
in the CacheBehavior
or\n\t\t\t\tDefaultCacheBehavior
. To specify how CloudFront should use SSL/TLS to\n\t\t\tcommunicate with your custom origin, use CustomOriginConfig
.
For more information, see Using HTTPS with CloudFront and Using Alternate Domain Names and HTTPS in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" + "smithy.api#documentation": "A complex type that determines the distribution’s SSL/TLS configuration for\n\t\t\tcommunicating with viewers.
\n\t\tIf the distribution doesn’t use Aliases
(also known as alternate domain\n\t\t\tnames or CNAMEs)—that is, if the distribution uses the CloudFront domain name such as\n\t\t\t\td111111abcdef8.cloudfront.net
—set\n\t\t\t\tCloudFrontDefaultCertificate
to true
and leave all other\n\t\t\tfields empty.
If the distribution uses Aliases
(alternate domain names or CNAMEs), use\n\t\t\tthe fields in this type to specify the following settings:
Which viewers the distribution accepts HTTPS connections from: only viewers\n\t\t\t\t\tthat support server name indication (SNI) (recommended), or all viewers\n\t\t\t\t\tincluding those that don’t support SNI.
\n\t\t\t\tTo accept HTTPS connections from only viewers that support SNI, set\n\t\t\t\t\t\t\t\tSSLSupportMethod
to sni-only
. This is\n\t\t\t\t\t\t\trecommended. Most browsers and clients support SNI.
To accept HTTPS connections from all viewers, including those that\n\t\t\t\t\t\t\tdon’t support SNI, set SSLSupportMethod
to\n\t\t\t\t\t\t\tvip
. This is not recommended, and results in additional\n\t\t\t\t\t\t\tmonthly charges from CloudFront.
The minimum SSL/TLS protocol version that the distribution can use to\n\t\t\t\t\tcommunicate with viewers. To specify a minimum version, choose a value for\n\t\t\t\t\t\tMinimumProtocolVersion
. For more information, see Security Policy in the\n\t\t\t\t\t\tAmazon CloudFront Developer Guide.
The location of the SSL/TLS certificate, Certificate Manager\n\t\t\t\t\t\t(ACM) (recommended) or Identity and Access Management (IAM). You specify the location by\n\t\t\t\t\tsetting a value in one of the following fields (not both):
\n\t\t\t\t\n ACMCertificateArn
\n
\n IAMCertificateId
\n
All distributions support HTTPS connections from viewers. To require viewers to use\n\t\t\tHTTPS only, or to redirect them from HTTP to HTTPS, use\n\t\t\t\tViewerProtocolPolicy
in the CacheBehavior
or\n\t\t\t\tDefaultCacheBehavior
. To specify how CloudFront should use SSL/TLS to\n\t\t\tcommunicate with your custom origin, use CustomOriginConfig
.
For more information, see Using HTTPS with CloudFront and Using Alternate Domain Names and HTTPS in the\n\t\t\t\tAmazon CloudFront Developer Guide.
" } }, "com.amazonaws.cloudfront#ViewerProtocolPolicy": { diff --git a/codegen/sdk/aws-models/codecatalyst.json b/codegen/sdk/aws-models/codecatalyst.json index 4910cba4fe7..515bd2b4006 100644 --- a/codegen/sdk/aws-models/codecatalyst.json +++ b/codegen/sdk/aws-models/codecatalyst.json @@ -5516,4 +5516,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/connect.json b/codegen/sdk/aws-models/connect.json index 6f541f1c0e5..2582e7caa87 100644 --- a/codegen/sdk/aws-models/connect.json +++ b/codegen/sdk/aws-models/connect.json @@ -970,7 +970,7 @@ "name": "connect" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon Connect is a cloud-based contact center solution that you use to set up and\n manage a customer contact center and provide reliable customer engagement at any scale.
\nAmazon Connect provides metrics and real-time reporting that enable you to optimize\n contact routing. You can also resolve customer issues more efficiently by getting customers in\n touch with the appropriate agents.
\nThere are limits to the number of Amazon Connect resources that you can create. There\n are also limits to the number of requests that you can make per second. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.
\nYou can connect programmatically to an Amazon Web Services service by using an endpoint. For\n a list of Amazon Connect endpoints, see Amazon Connect Endpoints.
", + "smithy.api#documentation": "Amazon Connect is a cloud-based contact center solution that you use to set up and\n manage a customer contact center and provide reliable customer engagement at any scale.
\nAmazon Connect provides metrics and real-time reporting that enable you to optimize\n contact routing. You can also resolve customer issues more efficiently by getting customers in\n touch with the appropriate agents.
\nThere are limits to the number of Amazon Connect resources that you can create. There\n are also limits to the number of requests that you can make per second. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.
\nYou can connect programmatically to an Amazon Web Services service by using an endpoint. For\n a list of Amazon Connect endpoints, see Amazon Connect Endpoints.
\nWorking with flows? Check out the Amazon Connect Flow\n language.
\nA unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs.
\nPattern: ^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$
\n
A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If not provided, the Amazon Web Services\n SDK populates this field. For more information about idempotency, see\n Making retries safe with idempotent APIs.
\n \nPattern: ^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$
\n
Contains summary information about a flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
" + "smithy.api#documentation": "Contains summary information about a flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
" } }, "com.amazonaws.connect#ContactFlowSummaryList": { @@ -3917,7 +3917,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a flow for the specified Amazon Connect instance.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", + "smithy.api#documentation": "Creates a flow for the specified Amazon Connect instance.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", "smithy.api#http": { "method": "PUT", "uri": "/contact-flows/{InstanceId}", @@ -4753,7 +4753,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a rule for the specified Amazon Connect instance.
\nUse the Rules Function language to code conditions for the rule.\n
", + "smithy.api#documentation": "Creates a rule for the specified Amazon Connect instance.
", "smithy.api#http": { "method": "POST", "uri": "/rules/{InstanceId}", @@ -4930,7 +4930,7 @@ "TagRestrictedResources": { "target": "com.amazonaws.connect#TagRestrictedResourceList", "traits": { - "smithy.api#documentation": "The list of resources that a security profile applies tag restrictions to in Amazon Connect. Following are acceptable ResourceNames: User
| SecurityProfile
| Queue
| \n RoutingProfile
\n
The list of resources that a security profile applies tag restrictions to in Amazon Connect.
" } } } @@ -6377,7 +6377,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a traffic distribution group. This API can be called only in the Region where the traffic distribution group is\n created.
\nFor more information about deleting traffic distribution groups, see Delete traffic distribution groups in\n the Amazon Connect Administrator Guide.
", + "smithy.api#documentation": "Deletes a traffic distribution group. This API can be called only in the Region where the traffic distribution group is\n created.
\n\nFor more information about deleting traffic distribution groups, see Delete traffic distribution groups in\n the Amazon Connect Administrator Guide.
", "smithy.api#http": { "method": "DELETE", "uri": "/traffic-distribution-group/{TrafficDistributionGroupId}", @@ -6788,7 +6788,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the specified flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", + "smithy.api#documentation": "Describes the specified flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", "smithy.api#http": { "method": "GET", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}", @@ -8922,7 +8922,7 @@ "CurrentMetrics": { "target": "com.amazonaws.connect#CurrentMetrics", "traits": { - "smithy.api#documentation": "The metrics to retrieve. Specify the name and unit for each metric. The following metrics\n are available. For a description of all the metrics, see Real-time Metrics\n Definitions in the Amazon Connect Administrator Guide.
\nUnit: COUNT
\nName in real-time metrics report: ACW\n
\nUnit: COUNT
\nName in real-time metrics report: Available\n
\nUnit: COUNT
\nName in real-time metrics report: Error\n
\nUnit: COUNT
\nName in real-time metrics report: NPT (Non-Productive Time)\n
\nUnit: COUNT
\nName in real-time metrics report: On\n contact\n
\nUnit: COUNT
\nName in real-time metrics report: On\n contact\n
\nUnit: COUNT
\nName in real-time metrics report: Online\n
\nUnit: COUNT
\nName in real-time metrics report: Staffed\n
\nUnit: COUNT
\nName in real-time metrics report: In\n queue\n
\nUnit: COUNT
\nName in real-time metrics report: Scheduled\n
\nUnit: SECONDS
\nWhen you use groupings, Unit says SECONDS and the Value is returned in SECONDS.
\nWhen you do not use groupings, Unit says SECONDS but the Value is returned in\n MILLISECONDS. For example, if you get a response like this:
\n\n { \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0\n
}
The actual OLDEST_CONTACT_AGE is 24 seconds.
\nName in real-time metrics report: Oldest\n
\nUnit: COUNT
\nName in real-time metrics report: Active\n
\nUnit: COUNT
\nName in real-time metrics report: Availability\n
\nThe metrics to retrieve. Specify the name and unit for each metric. The following metrics\n are available. For a description of all the metrics, see Real-time Metrics\n Definitions in the Amazon Connect Administrator Guide.
\nUnit: COUNT
\nName in real-time metrics report: ACW\n
\n\nUnit: COUNT
\nName in real-time metrics report: Available\n
\nUnit: COUNT
\nName in real-time metrics report: Error\n
\nUnit: COUNT
\nName in real-time metrics report: NPT (Non-Productive Time)\n
\nUnit: COUNT
\nName in real-time metrics report: On\n contact\n
\nUnit: COUNT
\nName in real-time metrics report: On\n contact\n
\nUnit: COUNT
\nName in real-time metrics report: Online\n
\nUnit: COUNT
\nName in real-time metrics report: Staffed\n
\nUnit: COUNT
\nName in real-time metrics report: In\n queue\n
\nUnit: COUNT
\nName in real-time metrics report: Scheduled\n
\nUnit: SECONDS
\nWhen you use groupings, Unit says SECONDS and the Value is returned in SECONDS.
\nWhen you do not use groupings, Unit says SECONDS but the Value is returned in\n MILLISECONDS. For example, if you get a response like this:
\n\n { \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0\n
}
The actual OLDEST_CONTACT_AGE is 24 seconds.
\n\nName in real-time metrics report: Oldest\n
\nUnit: COUNT
\nName in real-time metrics report: Active\n
\nUnit: COUNT
\nName in real-time metrics report: Availability\n
\nGets historical metric data from the specified Amazon Connect instance.
\nFor a description of each historical metric, see Historical Metrics\n Definitions in the Amazon Connect Administrator Guide.
", + "smithy.api#documentation": "Gets historical metric data from the specified Amazon Connect instance.
\n\nFor a description of each historical metric, see Historical Metrics\n Definitions in the Amazon Connect Administrator Guide.
", "smithy.api#http": { "method": "POST", "uri": "/metrics/historical/{InstanceId}", @@ -9206,13 +9206,13 @@ "Groupings": { "target": "com.amazonaws.connect#Groupings", "traits": { - "smithy.api#documentation": "The grouping applied to the metrics returned. For example, when results are grouped by\n queue, the metrics returned are grouped by queue. The values returned apply to the metrics for\n each queue rather than aggregated for all queues.
\nIf no grouping is specified, a summary of metrics for all queues is returned.
" + "smithy.api#documentation": "The grouping applied to the metrics returned. For example, when results are grouped by\n queue, the metrics returned are grouped by queue. The values returned apply to the metrics for\n each queue rather than aggregated for all queues.
\n\nIf no grouping is specified, a summary of metrics for all queues is returned.
" } }, "HistoricalMetrics": { "target": "com.amazonaws.connect#HistoricalMetrics", "traits": { - "smithy.api#documentation": "The metrics to retrieve. Specify the name, unit, and statistic for each metric. The\n following historical metrics are available. For a description of each metric, see Historical Metrics Definitions in the Amazon Connect Administrator\n Guide.
\nThis API does not support a contacts\n incoming metric (there's no CONTACTS_INCOMING metric missing from the documented list).
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: PERCENT
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: MAX
\nYou can include up to 20 SERVICE_LEVEL metrics in a request.
\nUnit: PERCENT
\nStatistic: AVG
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
The metrics to retrieve. Specify the name, unit, and statistic for each metric. The\n following historical metrics are available. For a description of each metric, see Historical Metrics Definitions in the Amazon Connect Administrator\n Guide.
\nThis API does not support a contacts\n incoming metric (there's no CONTACTS_INCOMING metric missing from the documented list).
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: COUNT
\nStatistic: SUM
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: PERCENT
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: AVG
\nUnit: SECONDS
\nStatistic: MAX
\nYou can include up to 20 SERVICE_LEVEL metrics in a request.
\nUnit: PERCENT
\nStatistic: AVG
\nThreshold: For ThresholdValue
, enter any whole number from 1 to 604800\n (inclusive), in seconds. For Comparison
, you must enter LT
(for\n \"Less than\").
Provides information about the flows for the specified Amazon Connect instance.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
\nFor more information about flows, see Flows in the\n Amazon Connect Administrator Guide.
", + "smithy.api#documentation": "Provides information about the flows for the specified Amazon Connect instance.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
\nFor more information about flows, see Flows in the\n Amazon Connect Administrator Guide.
", "smithy.api#http": { "method": "GET", "uri": "/contact-flows-summary/{InstanceId}", @@ -11929,7 +11929,7 @@ } ], "traits": { - "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\nReturns a paginated list of storage configs for the identified instance and resource\n type.
", + "smithy.api#documentation": "This API is in preview release for Amazon Connect and is subject to change.
\n\nReturns a paginated list of storage configs for the identified instance and resource\n type.
", "smithy.api#http": { "method": "GET", "uri": "/instance/{InstanceId}/storage-configs", @@ -12349,7 +12349,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the phone numbers for the specified Amazon Connect instance.
\nFor more information about phone numbers, see Set Up Phone Numbers for Your\n Contact Center in the Amazon Connect Administrator\n Guide.
\nThe phone number Arn
value that is returned from each of the items in the\n PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail with\n a ResourceNotFoundException
. Instead, use the ListPhoneNumbersV2 API.\n It returns the new phone number ARN that can be used to tag phone number resources.
Provides information about the phone numbers for the specified Amazon Connect instance.
\nFor more information about phone numbers, see Set Up Phone Numbers for Your\n Contact Center in the Amazon Connect Administrator\n Guide.
\n\nThe phone number Arn
value that is returned from each of the items in the\n PhoneNumberSummaryList cannot be used to tag phone number resources. It will fail with\n a ResourceNotFoundException
. Instead, use the ListPhoneNumbersV2 API.\n It returns the new phone number ARN that can be used to tag phone number resources.
When a contact is being recorded, and the recording has been suspended using\n SuspendContactRecording, this API resumes recording the call.
\nOnly voice recordings are supported at this time.
", + "smithy.api#documentation": "When a contact is being recorded, and the recording has been suspended using\n SuspendContactRecording, this API resumes recording the call.
\n\nOnly voice recordings are supported at this time.
", "smithy.api#http": { "method": "POST", "uri": "/contact/resume-recording", @@ -17612,7 +17612,7 @@ } }, "traits": { - "smithy.api#documentation": "The search criteria to be used to return routing profiles.
\nThe name
and description
fields support \"contains\" queries with\n a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths\n outside of this range will throw invalid results.
The search criteria to be used to return routing profiles.
\nThe name
and description
fields support \"contains\" queries with\n a minimum of 2 characters and a maximum of 25 characters. Any queries with character lengths\n outside of this range will throw invalid results.
The search criteria to be used to return security profiles.
\nThe name
field support \"contains\" queries with a minimum of 2 characters and\n maximum of 25 characters. Any queries with character lengths outside of this range will throw\n invalid results.
The currently supported value for FieldName
: name
\n
The search criteria to be used to return security profiles.
\nThe name
field support \"contains\" queries with a minimum of 2 characters and\n maximum of 25 characters. Any queries with character lengths outside of this range will throw\n invalid results.
The currently supported value for FieldName
: name
\n
Searches users in an Amazon Connect instance, with optional filtering.
\n\n AfterContactWorkTimeLimit
is returned in milliseconds.
Searches users in an Amazon Connect instance, with optional filtering.
\n \n\n AfterContactWorkTimeLimit
is returned in milliseconds.
Initiates a flow to start a new chat for the customer. Response of this API provides a token\n required to obtain credentials from the CreateParticipantConnection API in the Amazon Connect Participant Service.
\nWhen a new chat contact is successfully created, clients must subscribe to the participant’s\n connection for the created chat within 5 minutes. This is achieved by invoking CreateParticipantConnection with WEBSOCKET and CONNECTION_CREDENTIALS.
\nA 429 error occurs in the following situations:
\nAPI rate limit is exceeded. API TPS throttling returns a TooManyRequests
\n exception.
The quota for concurrent active\n chats is exceeded. Active chat throttling returns a\n LimitExceededException
.
If you use the ChatDurationInMinutes
parameter and receive a 400 error, your\n account may not support the ability to configure custom chat durations. For more information,\n contact Amazon Web Services Support.
For more information about chat, see Chat in the Amazon Connect\n Administrator Guide.
", + "smithy.api#documentation": "Initiates a flow to start a new chat for the customer. Response of this API provides a token\n required to obtain credentials from the CreateParticipantConnection API in the Amazon Connect Participant Service.
\n\nWhen a new chat contact is successfully created, clients must subscribe to the participant’s\n connection for the created chat within 5 minutes. This is achieved by invoking CreateParticipantConnection with WEBSOCKET and CONNECTION_CREDENTIALS.
\n\nA 429 error occurs in the following situations:
\nAPI rate limit is exceeded. API TPS throttling returns a TooManyRequests
\n exception.
The quota for concurrent active\n chats is exceeded. Active chat throttling returns a\n LimitExceededException
.
If you use the ChatDurationInMinutes
parameter and receive a 400 error, your\n account may not support the ability to configure custom chat durations. For more information,\n contact Amazon Web Services Support.
For more information about chat, see Chat in the Amazon Connect\n Administrator Guide.
", "smithy.api#http": { "method": "PUT", "uri": "/contact/chat", @@ -19047,7 +19047,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts recording the contact:
\nIf the API is called before the agent joins the call, recording\n starts when the agent joins the call.
\nIf the API is called after the agent joins the call, recording starts\n at the time of the API call.
\nStartContactRecording is a one-time action. For example, if you use StopContactRecording to\n stop recording an ongoing call, you can't use StartContactRecording to restart it. For scenarios\n where the recording has started and you want to suspend and resume it, such as when collecting\n sensitive information (for example, a credit card number), use SuspendContactRecording and\n ResumeContactRecording.
\nYou can use this API to override the recording behavior configured in the Set recording\n behavior block.
\nOnly voice recordings are supported at this time.
", + "smithy.api#documentation": "Starts recording the contact:
\nIf the API is called before the agent joins the call, recording\n starts when the agent joins the call.
\nIf the API is called after the agent joins the call, recording starts\n at the time of the API call.
\nStartContactRecording is a one-time action. For example, if you use StopContactRecording to\n stop recording an ongoing call, you can't use StartContactRecording to restart it. For scenarios\n where the recording has started and you want to suspend and resume it, such as when collecting\n sensitive information (for example, a credit card number), use SuspendContactRecording and\n ResumeContactRecording.
\nYou can use this API to override the recording behavior configured in the Set recording\n behavior block.
\nOnly voice recordings are supported at this time.
", "smithy.api#http": { "method": "POST", "uri": "/contact/start-recording", @@ -19204,7 +19204,7 @@ } ], "traits": { - "smithy.api#documentation": "Places an outbound call to a contact, and then initiates the flow. It performs the actions\n in the flow that's specified (in ContactFlowId
).
Agents do not initiate the outbound API, which means that they do not dial the contact. If\n the flow places an outbound call to a contact, and then puts the contact in queue, the call is\n then routed to the agent, like any other inbound case.
\nThere is a 60-second dialing timeout for this operation. If the call is not connected after\n 60 seconds, it fails.
\nUK numbers with a 447 prefix are not allowed by default. Before you can dial these UK\n mobile numbers, you must submit a service quota increase request. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.
\nCampaign calls are not allowed by default. Before you can make a call with\n TrafficType
= CAMPAIGN
, you must submit a service quota increase\n request to the quota Amazon Connect campaigns.
Places an outbound call to a contact, and then initiates the flow. It performs the actions\n in the flow that's specified (in ContactFlowId
).
Agents do not initiate the outbound API, which means that they do not dial the contact. If\n the flow places an outbound call to a contact, and then puts the contact in queue, the call is\n then routed to the agent, like any other inbound case.
\n\nThere is a 60-second dialing timeout for this operation. If the call is not connected after\n 60 seconds, it fails.
\nUK numbers with a 447 prefix are not allowed by default. Before you can dial these UK\n mobile numbers, you must submit a service quota increase request. For more information, see\n Amazon Connect Service Quotas in the Amazon Connect Administrator\n Guide.
\nCampaign calls are not allowed by default. Before you can make a call with\n TrafficType
= CAMPAIGN
, you must submit a service quota increase\n request to the quota Amazon Connect campaigns.
Ends the specified contact. This call does not work for the following initiation\n methods:
\nDISCONNECT
\nTRANSFER
\nQUEUE_TRANSFER
\nEnds the specified contact. This call does not work for the following initiation\n methods:
\n \nDISCONNECT
\nTRANSFER
\nQUEUE_TRANSFER
\nStops recording a call when a contact is being recorded. StopContactRecording is a one-time\n action. If you use StopContactRecording to stop recording an ongoing call, you can't use\n StartContactRecording to restart it. For scenarios where the recording has started and you want\n to suspend it for sensitive information (for example, to collect a credit card number), and then\n restart it, use SuspendContactRecording and ResumeContactRecording.
\nOnly voice recordings are supported at this time.
", + "smithy.api#documentation": "Stops recording a call when a contact is being recorded. StopContactRecording is a one-time\n action. If you use StopContactRecording to stop recording an ongoing call, you can't use\n StartContactRecording to restart it. For scenarios where the recording has started and you want\n to suspend it for sensitive information (for example, to collect a credit card number), and then\n restart it, use SuspendContactRecording and ResumeContactRecording.
\n\nOnly voice recordings are supported at this time.
", "smithy.api#http": { "method": "POST", "uri": "/contact/stop-recording", @@ -19700,7 +19700,7 @@ } }, "traits": { - "smithy.api#documentation": "A leaf node condition which can be used to specify a string condition.
\nThe currently supported value for FieldName
: name
\n
A leaf node condition which can be used to specify a string condition.
\nThe currently supported value for FieldName
: name
\n
Updates the specified flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", + "smithy.api#documentation": "Updates the specified flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", "smithy.api#http": { "method": "POST", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}/content", @@ -21032,7 +21032,7 @@ "Content": { "target": "com.amazonaws.connect#ContactFlowContent", "traits": { - "smithy.api#documentation": "The JSON string that represents flow's content. For an example, see Example contact\n flow in Amazon Connect Flow language.
", + "smithy.api#documentation": "The JSON string that represents flow's content. For an example, see Example contact\n flow in Amazon Connect Flow language in the Amazon Connect\n Administrator Guide.
", "smithy.api#required": {} } } @@ -21295,7 +21295,7 @@ } ], "traits": { - "smithy.api#documentation": "The name of the flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", + "smithy.api#documentation": "The name of the flow.
\nYou can also create and update flows using the Amazon Connect\n Flow language.
", "smithy.api#http": { "method": "POST", "uri": "/contact-flows/{InstanceId}/{ContactFlowId}/name", @@ -22493,7 +22493,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates a rule for the specified Amazon Connect instance.
\nUse the Rules Function language to code conditions for the rule.\n
", + "smithy.api#documentation": "Updates a rule for the specified Amazon Connect instance.
", "smithy.api#http": { "method": "PUT", "uri": "/rules/{InstanceId}/{RuleId}", @@ -22835,7 +22835,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the traffic distribution for a given traffic distribution group.
\nFor more information about updating a traffic distribution group, see Update telephony\n traffic distribution across Amazon Web Services Regions\n in the Amazon Connect Administrator Guide.
", + "smithy.api#documentation": "Updates the traffic distribution for a given traffic distribution group.
\n\nFor more information about updating a traffic distribution group, see Update telephony\n traffic distribution across Amazon Web Services Regions\n in the Amazon Connect Administrator Guide.
", "smithy.api#http": { "method": "PUT", "uri": "/traffic-distribution/{Id}", @@ -23629,7 +23629,7 @@ "target": "com.amazonaws.connect#AfterContactWorkTimeLimit", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The After Call Work (ACW) timeout setting, in seconds.
\nWhen returned by a SearchUsers
call, AfterContactWorkTimeLimit
is\n returned in milliseconds.
The After Call Work (ACW) timeout setting, in seconds.
\n \nWhen returned by a SearchUsers
call, AfterContactWorkTimeLimit
is\n returned in milliseconds.
Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage\n a customer contact center and provide reliable customer engagement at any scale.
\nAmazon Connect enables customer contacts through voice or chat.
\nThe APIs described here are used by chat participants, such as agents and\n customers.
", + "smithy.api#documentation": "Amazon Connect is a cloud-based contact center solution that makes it easy to set up and manage\n a customer contact center and provide reliable customer engagement at any scale.
\nAmazon Connect enables customer contacts through voice or chat.
\n\nThe APIs described here are used by chat participants, such as agents and\n customers.
", "smithy.api#title": "Amazon Connect Participant Service", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -142,6 +142,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -398,9 +407,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -411,9 +420,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-south-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -424,9 +433,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -437,9 +446,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-south-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -450,9 +459,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ca-central-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -463,9 +472,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ca-central-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -476,9 +485,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -489,9 +498,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ca-central-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -502,9 +511,9 @@ } }, "params": { - "UseFIPS": true, "Region": "eu-central-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -515,9 +524,9 @@ } }, "params": { - "UseFIPS": true, "Region": "eu-central-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -528,9 +537,9 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-central-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -541,9 +550,9 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-central-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -554,9 +563,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -567,9 +576,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-west-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -580,9 +589,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -593,9 +602,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-west-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -606,9 +615,9 @@ } }, "params": { - "UseFIPS": true, "Region": "af-south-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -619,9 +628,9 @@ } }, "params": { - "UseFIPS": true, "Region": "af-south-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -632,9 +641,9 @@ } }, "params": { - "UseFIPS": false, "Region": "af-south-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -645,9 +654,9 @@ } }, "params": { - "UseFIPS": false, "Region": "af-south-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -658,9 +667,9 @@ } }, "params": { - "UseFIPS": true, "Region": "eu-west-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -671,9 +680,9 @@ } }, "params": { - "UseFIPS": true, "Region": "eu-west-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -684,9 +693,9 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -697,9 +706,9 @@ } }, "params": { - "UseFIPS": false, "Region": "eu-west-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -710,9 +719,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -723,9 +732,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-northeast-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -736,9 +745,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -749,9 +758,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -762,9 +771,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -775,9 +784,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-northeast-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -788,9 +797,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -801,9 +810,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-northeast-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -814,9 +823,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -827,9 +836,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -840,9 +849,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -853,9 +862,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -866,9 +875,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -879,9 +888,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-southeast-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -892,9 +901,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -905,9 +914,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -918,9 +927,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -931,9 +940,9 @@ } }, "params": { - "UseFIPS": true, "Region": "ap-southeast-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -944,9 +953,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -957,9 +966,9 @@ } }, "params": { - "UseFIPS": false, "Region": "ap-southeast-2", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -970,9 +979,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -983,9 +992,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -996,9 +1005,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -1009,9 +1018,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -1022,9 +1031,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -1034,9 +1043,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -1046,9 +1055,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } @@ -1067,26 +1076,22 @@ } }, "com.amazonaws.connectparticipant#ArtifactStatus": { - "type": "enum", - "members": { - "APPROVED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "APPROVED" - } - }, - "REJECTED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "REJECTED" - } - }, - "IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IN_PROGRESS" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "APPROVED", + "name": "APPROVED" + }, + { + "value": "REJECTED", + "name": "REJECTED" + }, + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" } - } + ] } }, "com.amazonaws.connectparticipant#AttachmentIdList": { @@ -1165,7 +1170,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 16384 + "max": 1024 } } }, @@ -1188,68 +1193,50 @@ } }, "com.amazonaws.connectparticipant#ChatItemType": { - "type": "enum", - "members": { - "TYPING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TYPING" - } - }, - "PARTICIPANT_JOINED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PARTICIPANT_JOINED" - } - }, - "PARTICIPANT_LEFT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PARTICIPANT_LEFT" - } - }, - "CHAT_ENDED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CHAT_ENDED" - } - }, - "TRANSFER_SUCCEEDED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TRANSFER_SUCCEEDED" - } - }, - "TRANSFER_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TRANSFER_FAILED" - } - }, - "MESSAGE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MESSAGE" - } - }, - "EVENT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EVENT" - } - }, - "ATTACHMENT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ATTACHMENT" - } - }, - "CONNECTION_ACK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CONNECTION_ACK" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "TYPING", + "name": "TYPING" + }, + { + "value": "PARTICIPANT_JOINED", + "name": "PARTICIPANT_JOINED" + }, + { + "value": "PARTICIPANT_LEFT", + "name": "PARTICIPANT_LEFT" + }, + { + "value": "CHAT_ENDED", + "name": "CHAT_ENDED" + }, + { + "value": "TRANSFER_SUCCEEDED", + "name": "TRANSFER_SUCCEEDED" + }, + { + "value": "TRANSFER_FAILED", + "name": "TRANSFER_FAILED" + }, + { + "value": "MESSAGE", + "name": "MESSAGE" + }, + { + "value": "EVENT", + "name": "EVENT" + }, + { + "value": "ATTACHMENT", + "name": "ATTACHMENT" + }, + { + "value": "CONNECTION_ACK", + "name": "CONNECTION_ACK" } - } + ] } }, "com.amazonaws.connectparticipant#ClientToken": { @@ -1290,7 +1277,7 @@ } ], "traits": { - "smithy.api#documentation": "Allows you to confirm that the attachment has been uploaded using the pre-signed URL\n provided in StartAttachmentUpload API.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Allows you to confirm that the attachment has been uploaded using the pre-signed URL\n provided in StartAttachmentUpload API.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/complete-attachment-upload", @@ -1358,7 +1345,7 @@ "Expiry": { "target": "com.amazonaws.connectparticipant#ISO8601Datetime", "traits": { - "smithy.api#documentation": "The expiration of the token.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" + "smithy.api#documentation": "The expiration of the token.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" } } }, @@ -1367,20 +1354,18 @@ } }, "com.amazonaws.connectparticipant#ConnectionType": { - "type": "enum", - "members": { - "WEBSOCKET": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WEBSOCKET" - } - }, - "CONNECTION_CREDENTIALS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CONNECTION_CREDENTIALS" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "WEBSOCKET", + "name": "WEBSOCKET" + }, + { + "value": "CONNECTION_CREDENTIALS", + "name": "CONNECTION_CREDENTIALS" } - } + ] } }, "com.amazonaws.connectparticipant#ConnectionTypeList": { @@ -1435,7 +1420,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates the participant's connection. Note that ParticipantToken is used for invoking this API instead of\n ConnectionToken.
\nThe participant token is valid for the lifetime of the participant –\n until they are part of a contact.
\nThe response URL for WEBSOCKET
Type has a connect expiry timeout of 100s.\n Clients must manually connect to the returned websocket URL and subscribe to the desired\n topic.
For chat, you need to publish the following on the established websocket\n connection:
\n\n {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}
\n
Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter,\n clients need to call this API again to obtain a new websocket URL and perform the same\n steps as before.
\n\n Message streaming support: This API can also be used together with the\n StartContactStreaming\n API to create a participant connection for chat contacts that are\n not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect\n Administrator Guide.
\n\n Feature specifications: For information about feature specifications, such as the allowed number of open\n websocket connections per participant, see Feature specifications in the Amazon Connect Administrator\n Guide.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
\nCreates the participant's connection. Note that ParticipantToken is used for invoking this API instead of\n ConnectionToken.
\nThe participant token is valid for the lifetime of the participant –\n until they are part of a contact.
\nThe response URL for WEBSOCKET
Type has a connect expiry timeout of 100s.\n Clients must manually connect to the returned websocket URL and subscribe to the desired\n topic.
For chat, you need to publish the following on the established websocket\n connection:
\n\n {\"topic\":\"aws/subscribe\",\"content\":{\"topics\":[\"aws/chat\"]}}
\n
Upon websocket URL expiry, as specified in the response ConnectionExpiry parameter,\n clients need to call this API again to obtain a new websocket URL and perform the same\n steps as before.
\n\n Message streaming support: This API can also be used together with the\n StartContactStreaming\n API to create a participant connection for chat contacts that are\n not using a websocket. For more information about message streaming, Enable real-time chat message streaming in the Amazon Connect\n Administrator Guide.
\n\n Feature specifications: For information about feature specifications, such as the allowed number of open\n websocket connections per participant, see Feature specifications in the Amazon Connect Administrator\n Guide.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
\nThis is a header parameter.
\nThe ParticipantToken as obtained from StartChatContact\n API response.
", + "smithy.api#documentation": "This is a header parameter.
\nThe ParticipantToken as obtained from StartChatContact\n API response.
", "smithy.api#httpHeader": "X-Amz-Bearer", "smithy.api#required": {} } @@ -1509,7 +1494,7 @@ } ], "traits": { - "smithy.api#documentation": "Disconnects a participant. Note that ConnectionToken is used for invoking this API\n instead of ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Disconnects a participant. Note that ConnectionToken is used for invoking this API\n instead of ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/disconnect", @@ -1573,7 +1558,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a pre-signed URL for download of a completed attachment. This is an\n asynchronous API for use with active contacts.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Provides a pre-signed URL for download of a completed attachment. This is an\n asynchronous API for use with active contacts.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/attachment", @@ -1641,7 +1626,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves a transcript of the session, including details about any attachments. Note\n that ConnectionToken is used for invoking this API instead of ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Retrieves a transcript of the session, including details about any attachments. Note\n that ConnectionToken is used for invoking this API instead of ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/transcript", @@ -1761,7 +1746,7 @@ "AbsoluteTime": { "target": "com.amazonaws.connectparticipant#Instant", "traits": { - "smithy.api#documentation": "The time when the message or event was sent.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" + "smithy.api#documentation": "The time when the message or event was sent.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" } }, "Content": { @@ -1868,26 +1853,22 @@ } }, "com.amazonaws.connectparticipant#ParticipantRole": { - "type": "enum", - "members": { - "AGENT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AGENT" - } - }, - "CUSTOMER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CUSTOMER" - } - }, - "SYSTEM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SYSTEM" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AGENT", + "name": "AGENT" + }, + { + "value": "CUSTOMER", + "name": "CUSTOMER" + }, + { + "value": "SYSTEM", + "name": "SYSTEM" } - } + ] } }, "com.amazonaws.connectparticipant#ParticipantToken": { @@ -1927,20 +1908,18 @@ } }, "com.amazonaws.connectparticipant#ScanDirection": { - "type": "enum", - "members": { - "FORWARD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FORWARD" - } - }, - "BACKWARD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BACKWARD" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FORWARD", + "name": "FORWARD" + }, + { + "value": "BACKWARD", + "name": "BACKWARD" } - } + ] } }, "com.amazonaws.connectparticipant#SendEvent": { @@ -1966,7 +1945,7 @@ } ], "traits": { - "smithy.api#documentation": "Sends an event. Note that ConnectionToken is used for invoking this API instead of\n ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Sends an event. Note that ConnectionToken is used for invoking this API instead of\n ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/event", @@ -1980,7 +1959,7 @@ "ContentType": { "target": "com.amazonaws.connectparticipant#ChatContentType", "traits": { - "smithy.api#documentation": "The content type of the request. Supported types are:
\napplication/vnd.amazonaws.connect.event.typing
\napplication/vnd.amazonaws.connect.event.connection.acknowledged
\nThe content type of the request. Supported types are:
\n\napplication/vnd.amazonaws.connect.event.typing
\napplication/vnd.amazonaws.connect.event.connection.acknowledged
\nThe time when the event was sent.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" + "smithy.api#documentation": "The time when the event was sent.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" } } } @@ -2047,7 +2026,7 @@ } ], "traits": { - "smithy.api#documentation": "Sends a message. Note that ConnectionToken is used for invoking this API instead of\n ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Sends a message. Note that ConnectionToken is used for invoking this API instead of\n ParticipantToken.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/message", @@ -2101,7 +2080,7 @@ "AbsoluteTime": { "target": "com.amazonaws.connectparticipant#Instant", "traits": { - "smithy.api#documentation": "The time when the message was sent.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" + "smithy.api#documentation": "The time when the message was sent.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" } } } @@ -2123,20 +2102,18 @@ } }, "com.amazonaws.connectparticipant#SortKey": { - "type": "enum", - "members": { - "DESCENDING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DESCENDING" - } - }, - "ASCENDING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ASCENDING" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DESCENDING", + "name": "DESCENDING" + }, + { + "value": "ASCENDING", + "name": "ASCENDING" } - } + ] } }, "com.amazonaws.connectparticipant#StartAttachmentUpload": { @@ -2165,7 +2142,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a pre-signed Amazon S3 URL in response for uploading the file directly to\n S3.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", + "smithy.api#documentation": "Provides a pre-signed Amazon S3 URL in response for uploading the file directly to\n S3.
\nThe Amazon Connect Participant Service APIs do not use Signature Version 4\n authentication.
", "smithy.api#http": { "method": "POST", "uri": "/participant/start-attachment-upload", @@ -2245,7 +2222,7 @@ "AbsoluteTime": { "target": "com.amazonaws.connectparticipant#Instant", "traits": { - "smithy.api#documentation": "The time in ISO format where to start.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" + "smithy.api#documentation": "The time in ISO format where to start.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" } }, "MostRecent": { @@ -2372,7 +2349,7 @@ "ConnectionExpiry": { "target": "com.amazonaws.connectparticipant#ISO8601Datetime", "traits": { - "smithy.api#documentation": "The URL expiration timestamp in ISO date format.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" + "smithy.api#documentation": "The URL expiration timestamp in ISO date format.
\nIt's specified in ISO 8601 format: yyyy-MM-ddThh:mm:ss.SSSZ. For example,\n 2019-11-08T02:41:28.172Z.
" } } }, diff --git a/codegen/sdk/aws-models/dataexchange.json b/codegen/sdk/aws-models/dataexchange.json index 1ed2b422dc6..c81bffcaa3e 100644 --- a/codegen/sdk/aws-models/dataexchange.json +++ b/codegen/sdk/aws-models/dataexchange.json @@ -6390,4 +6390,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/datasync.json b/codegen/sdk/aws-models/datasync.json index 7e14817d40f..b8f17531ba1 100644 --- a/codegen/sdk/aws-models/datasync.json +++ b/codegen/sdk/aws-models/datasync.json @@ -191,7 +191,7 @@ } ], "traits": { - "smithy.api#documentation": "Activates an DataSync agent that you have deployed in your storage\n environment. The activation process associates your agent with your account. In the activation\n process, you specify information such as the Amazon Web Services Region that you want to\n activate the agent in. You activate the agent in the Amazon Web Services Region where your\n target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region.
\nYou can activate the agent in a VPC (virtual private cloud) or provide the agent access to\n a VPC endpoint so you can run tasks without going over the public internet.
\nYou can use an agent for more than one location. If a task uses multiple agents, all of\n them need to have status AVAILABLE for the task to run. If you use multiple agents for a\n source location, the status of all the agents must be AVAILABLE for the task to run.
\nAgents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that\n ensures minimal interruption to your tasks.
" + "smithy.api#documentation": "Activates an DataSync agent that you have deployed in your storage\n environment. The activation process associates your agent with your account. In the activation\n process, you specify information such as the Amazon Web Services Region that you want to\n activate the agent in. You activate the agent in the Amazon Web Services Region where your\n target locations (in Amazon S3 or Amazon EFS) reside. Your tasks are created in this Amazon Web Services Region.
\nYou can activate the agent in a VPC (virtual private cloud) or provide the agent access to\n a VPC endpoint so you can run tasks without going over the public internet.
\nYou can use an agent for more than one location. If a task uses multiple agents, all of\n them need to have status AVAILABLE for the task to run. If you use multiple agents for a\n source location, the status of all the agents must be AVAILABLE for the task to run.
\n \n\nAgents are automatically updated by Amazon Web Services on a regular basis, using a mechanism that\n ensures minimal interruption to your tasks.
" } }, "com.amazonaws.datasync#CreateAgentRequest": { @@ -200,7 +200,7 @@ "ActivationKey": { "target": "com.amazonaws.datasync#ActivationKey", "traits": { - "smithy.api#documentation": "Your agent activation key. You can get the activation key either by sending an HTTP GET\n request with redirects that enable you to get the agent IP address (port 80). Alternatively,\n you can get it from the DataSync console.
\nThe redirect URL returned in the response provides you the activation key for your\n agent in the query string parameter activationKey
. It might also include other\n activation-related parameters; however, these are merely defaults. The arguments you pass to\n this API call determine the actual configuration of your agent.
For more information, see Activating an Agent in the DataSync\n User Guide.\n
", + "smithy.api#documentation": "Your agent activation key. You can get the activation key either by sending an HTTP GET\n request with redirects that enable you to get the agent IP address (port 80). Alternatively,\n you can get it from the DataSync console.
\nThe redirect URL returned in the response provides you the activation key for your\n agent in the query string parameter activationKey
. It might also include other\n activation-related parameters; however, these are merely defaults. The arguments you pass to\n this API call determine the actual configuration of your agent.
For more information, see Activating an Agent in the DataSync\n User Guide.\n
", "smithy.api#required": {} } }, @@ -763,7 +763,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#NfsSubdirectory", "traits": { - "smithy.api#documentation": "The subdirectory in the NFS file system that is used to read data from the NFS source\n location or write data to the NFS destination. The NFS path should be a path that's\n exported by the NFS server, or a subdirectory of that path. The path should be such that it\n can be mounted by other NFS clients in your network.
\nTo see all the paths exported by your NFS server, run \"showmount -e\n nfs-server-name
\" from an NFS client that has access to your server. You can specify\n any directory that appears in the results, and any subdirectory of that directory. Ensure that\n the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have\n permissions to read all the data. To ensure this, either configure the NFS export with\n no_root_squash,
or ensure that the permissions for all of the files that you\n want DataSync allow read access for all users. Doing either enables the agent to read the\n files. For the agent to access directories, you must additionally enable all execute\n access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
\nFor information about NFS export configuration, see 18.7. The /etc/exports\n Configuration File in the Red Hat Enterprise Linux documentation.
", + "smithy.api#documentation": "The subdirectory in the NFS file system that is used to read data from the NFS source\n location or write data to the NFS destination. The NFS path should be a path that's\n exported by the NFS server, or a subdirectory of that path. The path should be such that it\n can be mounted by other NFS clients in your network.
\nTo see all the paths exported by your NFS server, run \"showmount -e\n nfs-server-name
\" from an NFS client that has access to your server. You can specify\n any directory that appears in the results, and any subdirectory of that directory. Ensure that\n the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder you specified, DataSync needs to have\n permissions to read all the data. To ensure this, either configure the NFS export with\n no_root_squash,
or ensure that the permissions for all of the files that you\n want DataSync allow read access for all users. Doing either enables the agent to read the\n files. For the agent to access directories, you must additionally enable all execute\n access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
\n \nFor information about NFS export configuration, see 18.7. The /etc/exports\n Configuration File in the Red Hat Enterprise Linux documentation.
", "smithy.api#required": {} } }, @@ -934,7 +934,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an endpoint for an Amazon S3 bucket that DataSync can access for a transfer.
\nFor\n more information, see\n Create an Amazon S3 location\n in the DataSync User Guide.
" + "smithy.api#documentation": "Creates an endpoint for an Amazon S3 bucket that DataSync can access for a transfer.
\n\n \nFor\n more information, see\n Create an Amazon S3 location\n in the DataSync User Guide.
" } }, "com.amazonaws.datasync#CreateLocationS3Request": { @@ -956,7 +956,7 @@ "S3StorageClass": { "target": "com.amazonaws.datasync#S3StorageClass", "traits": { - "smithy.api#documentation": "The Amazon S3 storage class that you want to store your files in when this location is\n used as a task destination. For buckets in Amazon Web Services Regions, the storage class defaults to Standard.\n For buckets on Outposts, the storage class defaults to Amazon Web Services S3 Outposts.
\nFor more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that\n can affect your S3 storage cost. For detailed information, see Considerations when working with S3 storage classes in DataSync.
" + "smithy.api#documentation": "The Amazon S3 storage class that you want to store your files in when this location is\n used as a task destination. For buckets in Amazon Web Services Regions, the storage class defaults to Standard.\n For buckets on Outposts, the storage class defaults to Amazon Web Services S3 Outposts.
\n \nFor more information about S3 storage classes, see Amazon S3 Storage Classes. Some storage classes have behaviors that\n can affect your S3 storage cost. For detailed information, see Considerations when working with S3 storage classes in DataSync.
" } }, "S3Config": { @@ -1022,7 +1022,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#SmbSubdirectory", "traits": { - "smithy.api#documentation": "The subdirectory in the SMB file system that is used to read data from the SMB source\n location or write data to the SMB destination. The SMB path should be a path that's\n exported by the SMB server, or a subdirectory of that path. The path should be such that it\n can be mounted by other SMB clients in your network.
\n\n Subdirectory
must be specified with forward slashes. For example,\n /path/to/folder
.
To transfer all the data in the folder you specified, DataSync needs to have permissions\n to mount the SMB share, as well as to access all the data in that share. To ensure this,\n either ensure that the user/password specified belongs to the user who can mount the share,\n and who has the appropriate permissions for all of the files and directories that you want\n DataSync to access, or use credentials of a member of the Backup Operators group to mount\n the share. Doing either enables the agent to access the data. For the agent to access\n directories, you must additionally enable all execute access.
", + "smithy.api#documentation": "The subdirectory in the SMB file system that is used to read data from the SMB source\n location or write data to the SMB destination. The SMB path should be a path that's\n exported by the SMB server, or a subdirectory of that path. The path should be such that it\n can be mounted by other SMB clients in your network.
\n\n Subdirectory
must be specified with forward slashes. For example,\n /path/to/folder
.
To transfer all the data in the folder you specified, DataSync needs to have permissions\n to mount the SMB share, as well as to access all the data in that share. To ensure this,\n either ensure that the user/password specified belongs to the user who can mount the share,\n and who has the appropriate permissions for all of the files and directories that you want\n DataSync to access, or use credentials of a member of the Backup Operators group to mount\n the share. Doing either enables the agent to access the data. For the agent to access\n directories, you must additionally enable all execute access.
", "smithy.api#required": {} } }, @@ -1036,7 +1036,7 @@ "User": { "target": "com.amazonaws.datasync#SmbUser", "traits": { - "smithy.api#documentation": "The user who can mount the share, has the permissions to access files and folders in the\n SMB share.
\nFor information about choosing a user name that ensures sufficient permissions to files,\n folders, and metadata, see the User setting for SMB locations.
", + "smithy.api#documentation": "The user who can mount the share, has the permissions to access files and folders in the\n SMB share.
\n \nFor information about choosing a user name that ensures sufficient permissions to files,\n folders, and metadata, see the User setting for SMB locations.
", "smithy.api#required": {} } }, @@ -1108,7 +1108,7 @@ } ], "traits": { - "smithy.api#documentation": "Configures a task, which defines where and how DataSync transfers your\n data.
\nA task includes a source location, a destination location, and the preferences for how and\n when you want to transfer your data (such as bandwidth limits, scheduling, among other\n options).
" + "smithy.api#documentation": "Configures a task, which defines where and how DataSync transfers your\n data.
\nA task includes a source location, a destination location, and the preferences for how and\n when you want to transfer your data (such as bandwidth limits, scheduling, among other\n options).
\nWhen you create a task that transfers data between Amazon Web Services services in\n different Amazon Web Services Regions, one of your locations must reside in the Region where\n you're using DataSync.
\nFor more information, see the following topics:
\nSpecifies the configuration options for a task. Some options include preserving file or object metadata and verifying data integrity.
\nYou can also override these options before starting an individual run of a task (also\n known as a task execution). For more information, see StartTaskExecution.
" + "smithy.api#documentation": "The set of configuration options that control the behavior of a single execution of the\n task that occurs when you call StartTaskExecution
. You can configure these\n options to preserve metadata such as user ID (UID) and group ID (GID), file permissions, data\n integrity verification, and so on.
For each individual task execution, you can override these options by specifying the\n OverrideOptions
before starting the task execution. For more information, see\n the StartTaskExecution operation.
Specifies a list of filter rules that exclude specific data during your transfer. For more\n information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to exclude from a task. The list should\n contain a single filter string that consists of the patterns to exclude. The patterns are\n delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
\n
" } }, "Schedule": { @@ -1161,13 +1161,13 @@ "Tags": { "target": "com.amazonaws.datasync#InputTagList", "traits": { - "smithy.api#documentation": "Specifies the tags that you want to apply to the Amazon Resource Name (ARN)\n representing the task.
\n\n Tags are key-value pairs that help you manage, filter, and search\n for your DataSync resources.
" + "smithy.api#documentation": "The key-value pair that represents the tag that you want to add to the resource. The\n value can be an empty string.
" } }, "Includes": { "target": "com.amazonaws.datasync#FilterList", "traits": { - "smithy.api#documentation": "Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to include when running a task. The\n pattern contains a single filter string that consists of the patterns to include. The patterns\n are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
Deletes an DataSync task.
" + "smithy.api#documentation": "Deletes a task.
" } }, "com.amazonaws.datasync#DeleteTaskRequest": { @@ -1293,7 +1293,7 @@ "TaskArn": { "target": "com.amazonaws.datasync#TaskArn", "traits": { - "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the task that you want to delete.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task to delete.
", "smithy.api#required": {} } } @@ -2248,7 +2248,7 @@ "Status": { "target": "com.amazonaws.datasync#TaskExecutionStatus", "traits": { - "smithy.api#documentation": "The status of the task execution.
\nFor detailed information about task execution statuses, see Understanding\n Task Statuses in the DataSync User Guide.\n
" + "smithy.api#documentation": "The status of the task execution.
\n\n \nFor detailed information about task execution statuses, see Understanding\n Task Statuses in the DataSync User Guide.\n
" } }, "Options": { @@ -2257,13 +2257,13 @@ "Excludes": { "target": "com.amazonaws.datasync#FilterList", "traits": { - "smithy.api#documentation": "A list of filter rules that exclude specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to exclude from a task. The list should\n contain a single filter string that consists of the patterns to exclude. The patterns are\n delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"
\n
A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to include when running a task. The\n list should contain a single filter string that consists of the patterns to include. The\n patterns are delimited by \"|\" (that is, a pipe), for example: \"/folder1|/folder2\"
\n
The expected number of files that is to be transferred over the network. This value is\n calculated during the PREPARING
phase before the TRANSFERRING
phase\n of the task execution. This value is the expected number of files to be transferred. It's\n calculated based on comparing the content of the source and destination locations and finding\n the delta that needs to be transferred.
The expected number of files that is to be transferred over the network. This value is\n calculated during the PREPARING phase, before the TRANSFERRING phase. This value is the\n expected number of files to be transferred. It's calculated based on comparing the\n content of the source and destination locations and finding the delta that needs to be\n transferred.
" } }, "EstimatedBytesToTransfer": { @@ -2290,7 +2290,7 @@ "target": "com.amazonaws.datasync#long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The actual number of files that was transferred over the network. This value is\n calculated and updated on an ongoing basis during the TRANSFERRING
phase of the\n task execution. It's updated periodically when each file is read from the source and sent over\n the network.
If failures occur during a transfer, this value can be less than\n EstimatedFilesToTransfer
. In some cases, this value can also be greater than\n EstimatedFilesToTransfer
. This element is implementation-specific for some\n location types, so don't use it as an indicator for a correct file number or to monitor your\n task execution.
The actual number of files that was transferred over the network. This value is\n calculated and updated on an ongoing basis during the TRANSFERRING phase. It's updated\n periodically when each file is read from the source and sent over the network.
\nIf failures occur during a transfer, this value can be less than\n EstimatedFilesToTransfer
. This value can also be greater than\n EstimatedFilesTransferred
in some cases. This element is\n implementation-specific for some location types, so don't use it as an indicator for a correct\n file number or to monitor your task execution.
The total number of bytes that are involved in the transfer. For the number of bytes\n sent over the network, see BytesCompressed
.
The physical number of bytes transferred over the network.
" } }, "Result": { @@ -2317,7 +2317,7 @@ "target": "com.amazonaws.datasync#long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The physical number of bytes transferred over the network after compression was applied.\n In most cases, this number is less than BytesTransferred
unless the data isn't\n compressible.
The physical number of bytes transferred over the network after compression was applied.\n In most cases, this number is less than BytesTransferred
.
The status of the task that was described.
\nFor detailed information about task execution statuses, see Understanding\n Task Statuses in the DataSync User Guide.
" + "smithy.api#documentation": "The status of the task that was described.
\n\n \nFor detailed information about task execution statuses, see Understanding\n Task Statuses in the DataSync User Guide.
" } }, "Name": { @@ -2364,7 +2364,7 @@ "CurrentTaskExecutionArn": { "target": "com.amazonaws.datasync#TaskExecutionArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task execution that is transferring files.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task execution that is syncing files.
" } }, "SourceLocationArn": { @@ -2382,31 +2382,31 @@ "CloudWatchLogGroupArn": { "target": "com.amazonaws.datasync#LogGroupArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to\n monitor and log events in the task.
\nFor more information on these groups, see Working with Log Groups and Log\n Streams in the Amazon CloudWatch User Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Amazon CloudWatch log group that was used to\n monitor and log events in the task.
\n\n \nFor more information on these groups, see Working with Log Groups and Log\n Streams in the Amazon CloudWatch User Guide.
" } }, "SourceNetworkInterfaceArns": { "target": "com.amazonaws.datasync#SourceNetworkInterfaceArns", "traits": { - "smithy.api#documentation": "The Amazon Resource Names (ARNs) of the network interfaces created for your source location. For more information, see Network interface requirements.
" + "smithy.api#documentation": "The Amazon Resource Names (ARNs) of the source elastic network interfaces (ENIs) that were\n created for your subnet.
" } }, "DestinationNetworkInterfaceArns": { "target": "com.amazonaws.datasync#DestinationNetworkInterfaceArns", "traits": { - "smithy.api#documentation": "The Amazon Resource Names (ARNs) of the network interfaces created for your destination location. For more information, see Network interface requirements.
" + "smithy.api#documentation": "The Amazon Resource Names (ARNs) of the destination elastic network interfaces (ENIs) that\n were created for your subnet.
" } }, "Options": { "target": "com.amazonaws.datasync#Options", "traits": { - "smithy.api#documentation": "The configuration options that control the behavior of the StartTaskExecution
operation. Some options include preserving file or object metadata and verifying data integrity.
You can override these options for each task execution. For more information, see StartTaskExecution.
" + "smithy.api#documentation": "The set of configuration options that control the behavior of a single execution of the\n task that occurs when you call StartTaskExecution
. You can configure these\n options to preserve metadata such as user ID (UID) and group (GID), file permissions, data\n integrity verification, and so on.
For each individual task execution, you can override these options by specifying the\n overriding OverrideOptions
value to StartTaskExecution operation.
A list of filter rules that exclude specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to exclude from a task. The list should\n contain a single filter string that consists of the patterns to exclude. The patterns are\n delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
\n
" } }, "Schedule": { @@ -2436,7 +2436,7 @@ "Includes": { "target": "com.amazonaws.datasync#FilterList", "traits": { - "smithy.api#documentation": "A list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to include when running a task. The\n pattern contains a single filter string that consists of the patterns to include. The patterns\n are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2
\".
DataSync is a managed data transfer service that makes it simpler for you\n to automate moving data between on-premises storage and Amazon Web Services storage services. You also can use DataSync to transfer data between other cloud providers and Amazon Web Services storage services.
\nThis API interface reference includes documentation for using DataSync\n programmatically. For complete information, see the \n DataSync User\n Guide\n .
", + "smithy.api#documentation": "DataSync is a managed data transfer service that makes it simpler for you\n to automate moving data between on-premises storage and Amazon Web Services storage services. You also can use DataSync to transfer data between other cloud providers and Amazon Web Services storage services.
\nThis API interface reference includes documentation for using DataSync\n programmatically. For complete information, see the \n DataSync User\n Guide\n .
", "smithy.api#title": "AWS DataSync", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -2873,6 +2873,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -3136,8 +3145,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": true } }, { @@ -3149,8 +3158,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": false } }, { @@ -3162,8 +3171,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": true } }, { @@ -3175,8 +3184,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" + "Region": "ap-south-1", + "UseDualStack": false } }, { @@ -3188,8 +3197,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": true } }, { @@ -3201,8 +3210,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": false } }, { @@ -3214,8 +3223,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": true } }, { @@ -3227,8 +3236,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-south-1" + "Region": "eu-south-1", + "UseDualStack": false } }, { @@ -3240,8 +3249,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -3253,8 +3262,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -3266,8 +3275,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": true } }, { @@ -3279,8 +3288,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-east-1" + "Region": "us-gov-east-1", + "UseDualStack": false } }, { @@ -3292,8 +3301,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "me-central-1" + "Region": "me-central-1", + "UseDualStack": true } }, { @@ -3305,8 +3314,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "me-central-1" + "Region": "me-central-1", + "UseDualStack": false } }, { @@ -3318,8 +3327,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "me-central-1" + "Region": "me-central-1", + "UseDualStack": true } }, { @@ -3331,8 +3340,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "me-central-1" + "Region": "me-central-1", + "UseDualStack": false } }, { @@ -3344,8 +3353,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": true } }, { @@ -3357,8 +3366,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { @@ -3370,8 +3379,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": true } }, { @@ -3383,8 +3392,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ca-central-1" + "Region": "ca-central-1", + "UseDualStack": false } }, { @@ -3396,8 +3405,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": true } }, { @@ -3409,8 +3418,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": false } }, { @@ -3422,8 +3431,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-1" + "Region": "eu-central-1", + "UseDualStack": true } }, { @@ -3435,56 +3444,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://datasync-fips.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "error": "DualStack is enabled but this partition does not support DualStack" - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-west-1" - } - }, - { - "documentation": "For region us-iso-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://datasync.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-west-1" + "Region": "eu-central-1", + "UseDualStack": false } }, { @@ -3496,8 +3457,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": true } }, { @@ -3509,8 +3470,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": false } }, { @@ -3522,8 +3483,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": true } }, { @@ -3535,8 +3496,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-1" + "Region": "us-west-1", + "UseDualStack": false } }, { @@ -3548,8 +3509,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": true } }, { @@ -3561,8 +3522,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": false } }, { @@ -3574,8 +3535,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": true } }, { @@ -3587,8 +3548,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" + "Region": "us-west-2", + "UseDualStack": false } }, { @@ -3600,8 +3561,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": true } }, { @@ -3613,8 +3574,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": false } }, { @@ -3626,8 +3587,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": true } }, { @@ -3639,8 +3600,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "af-south-1" + "Region": "af-south-1", + "UseDualStack": false } }, { @@ -3652,8 +3613,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": true } }, { @@ -3665,8 +3626,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": false } }, { @@ -3678,8 +3639,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": true } }, { @@ -3691,8 +3652,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-north-1" + "Region": "eu-north-1", + "UseDualStack": false } }, { @@ -3704,8 +3665,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": true } }, { @@ -3717,8 +3678,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": false } }, { @@ -3730,8 +3691,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": true } }, { @@ -3743,8 +3704,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" + "Region": "eu-west-3", + "UseDualStack": false } }, { @@ -3756,8 +3717,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": true } }, { @@ -3769,8 +3730,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": false } }, { @@ -3782,8 +3743,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": true } }, { @@ -3795,8 +3756,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-2" + "Region": "eu-west-2", + "UseDualStack": false } }, { @@ -3808,8 +3769,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": true } }, { @@ -3821,8 +3782,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": false } }, { @@ -3834,8 +3795,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": true } }, { @@ -3847,8 +3808,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" + "Region": "eu-west-1", + "UseDualStack": false } }, { @@ -3860,8 +3821,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": true } }, { @@ -3873,8 +3834,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": false } }, { @@ -3886,8 +3847,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": true } }, { @@ -3899,8 +3860,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-3" + "Region": "ap-northeast-3", + "UseDualStack": false } }, { @@ -3912,8 +3873,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": true } }, { @@ -3925,8 +3886,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": false } }, { @@ -3938,8 +3899,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": true } }, { @@ -3951,8 +3912,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-2" + "Region": "ap-northeast-2", + "UseDualStack": false } }, { @@ -3964,8 +3925,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": true } }, { @@ -3977,8 +3938,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": false } }, { @@ -3990,8 +3951,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": true } }, { @@ -4003,8 +3964,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-northeast-1" + "Region": "ap-northeast-1", + "UseDualStack": false } }, { @@ -4016,8 +3977,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": true } }, { @@ -4029,8 +3990,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": false } }, { @@ -4042,8 +4003,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": true } }, { @@ -4055,8 +4016,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "me-south-1" + "Region": "me-south-1", + "UseDualStack": false } }, { @@ -4068,8 +4029,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": true } }, { @@ -4081,8 +4042,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": false } }, { @@ -4094,8 +4055,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": true } }, { @@ -4107,8 +4068,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" + "Region": "sa-east-1", + "UseDualStack": false } }, { @@ -4120,8 +4081,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": true } }, { @@ -4133,8 +4094,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": false } }, { @@ -4146,8 +4107,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": true } }, { @@ -4159,8 +4120,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-east-1" + "Region": "ap-east-1", + "UseDualStack": false } }, { @@ -4172,8 +4133,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -4185,8 +4146,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -4198,8 +4159,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": true } }, { @@ -4211,8 +4172,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-north-1" + "Region": "cn-north-1", + "UseDualStack": false } }, { @@ -4224,8 +4185,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": true } }, { @@ -4237,8 +4198,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { @@ -4250,8 +4211,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": true } }, { @@ -4263,8 +4224,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-gov-west-1" + "Region": "us-gov-west-1", + "UseDualStack": false } }, { @@ -4276,8 +4237,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": true } }, { @@ -4289,8 +4250,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": false } }, { @@ -4302,8 +4263,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": true } }, { @@ -4315,8 +4276,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "Region": "ap-southeast-1", + "UseDualStack": false } }, { @@ -4328,8 +4289,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": true } }, { @@ -4341,8 +4302,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": false } }, { @@ -4354,8 +4315,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": true } }, { @@ -4367,8 +4328,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-2" + "Region": "ap-southeast-2", + "UseDualStack": false } }, { @@ -4378,8 +4339,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -4391,8 +4352,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -4402,8 +4363,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": true } }, { @@ -4415,8 +4376,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-iso-east-1" + "Region": "us-iso-east-1", + "UseDualStack": false } }, { @@ -4428,8 +4389,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": true } }, { @@ -4441,8 +4402,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": false } }, { @@ -4454,8 +4415,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": true } }, { @@ -4467,8 +4428,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-3" + "Region": "ap-southeast-3", + "UseDualStack": false } }, { @@ -4480,8 +4441,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -4493,8 +4454,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -4506,8 +4467,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": true } }, { @@ -4519,8 +4480,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-1" + "Region": "us-east-1", + "UseDualStack": false } }, { @@ -4532,8 +4493,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": true } }, { @@ -4545,8 +4506,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": false } }, { @@ -4558,8 +4519,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": true } }, { @@ -4571,8 +4532,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-east-2" + "Region": "us-east-2", + "UseDualStack": false } }, { @@ -4584,8 +4545,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": true } }, { @@ -4597,8 +4558,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": false } }, { @@ -4610,8 +4571,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": true } }, { @@ -4623,8 +4584,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "cn-northwest-1" + "Region": "cn-northwest-1", + "UseDualStack": false } }, { @@ -4634,8 +4595,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -4647,8 +4608,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false } }, { @@ -4658,8 +4619,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": true } }, { @@ -4671,8 +4632,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, - "Region": "us-isob-east-1" + "Region": "us-isob-east-1", + "UseDualStack": false } }, { @@ -4684,8 +4645,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -4696,8 +4657,8 @@ }, "params": { "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -4708,8 +4669,8 @@ }, "params": { "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, "Endpoint": "https://example.com" } } @@ -5272,7 +5233,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns all the tags associated with an Amazon Web Services resource.
", + "smithy.api#documentation": "Returns all the tags associated with a specified resource.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -5287,20 +5248,20 @@ "ResourceArn": { "target": "com.amazonaws.datasync#TaggableResourceArn", "traits": { - "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the resource that you want tag information on.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource whose tags to list.
", "smithy.api#required": {} } }, "MaxResults": { "target": "com.amazonaws.datasync#MaxResults", "traits": { - "smithy.api#documentation": "Specifies how many results that you want in the response.
" + "smithy.api#documentation": "The maximum number of locations to return.
" } }, "NextToken": { "target": "com.amazonaws.datasync#NextToken", "traits": { - "smithy.api#documentation": "Specifies an opaque string that indicates the position to begin the next list of results in the response.
" + "smithy.api#documentation": "An opaque string that indicates the position at which to begin the next list of\n locations.
" } } }, @@ -5314,13 +5275,13 @@ "Tags": { "target": "com.amazonaws.datasync#OutputTagList", "traits": { - "smithy.api#documentation": "An array of tags applied to the specified resource.
" + "smithy.api#documentation": "Array of resource tags.
" } }, "NextToken": { "target": "com.amazonaws.datasync#NextToken", "traits": { - "smithy.api#documentation": "The opaque string that indicates the position to begin the next list of results in the response.
" + "smithy.api#documentation": "An opaque string that indicates the position at which to begin returning the next list\n of resource tags.
" } } }, @@ -5661,7 +5622,7 @@ "Version": { "target": "com.amazonaws.datasync#NfsVersion", "traits": { - "smithy.api#documentation": "Specifies the NFS version that you want DataSync to use when mounting your NFS share. If the server refuses to use the version specified, the task fails.
\nYou can specify the following options:
\n\n AUTOMATIC
(default): DataSync chooses NFS version 4.1.
\n NFS3
: Stateless protocol version that allows for asynchronous\n writes on the server.
\n NFSv4_0
: Stateful, firewall-friendly protocol version that supports\n delegations and pseudo file systems.
\n NFSv4_1
: Stateful protocol version that supports sessions,\n directory delegations, and parallel data processing. NFS version 4.1 also includes all\n features available in version 4.0.
DataSync currently only supports NFS version 3 with Amazon FSx for NetApp ONTAP locations.
\nSpecifies the NFS version that you want DataSync to use when mounting your NFS share. If the server refuses to use the version specified, the task fails.
\n\nYou can specify the following options:
\n\n AUTOMATIC
(default): DataSync chooses NFS version 4.1.
\n NFS3
: Stateless protocol version that allows for asynchronous\n writes on the server.
\n NFSv4_0
: Stateful, firewall-friendly protocol version that supports\n delegations and pseudo file systems.
\n NFSv4_1
: Stateful protocol version that supports sessions,\n directory delegations, and parallel data processing. NFS version 4.1 also includes all\n features available in version 4.0.
DataSync currently only supports NFS version 3 with Amazon FSx for NetApp ONTAP locations.
\nSpecifies how and when DataSync checks the integrity of your data during a\n transfer.
\nDefault value: POINT_IN_TIME_CONSISTENT
\n
\n ONLY_FILES_TRANSFERRED
(recommended): DataSync calculates the checksum of\n transferred files and metadata at the source location. At the end of the transfer, DataSync then\n compares this checksum to the checksum calculated on those files at the destination.
We recommend this option when transferring to S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage classes. For more information, see\n Storage class\n considerations with Amazon S3 locations.
\n\n POINT_IN_TIME_CONSISTENT
: At the end of the transfer, DataSync\n scans the entire source and destination to verify that both locations are fully\n synchronized.
You can't use this option when transferring to S3 Glacier Flexible Retrieval or\n S3 Glacier Deep Archive storage classes. For more information, see\n Storage class\n considerations with Amazon S3 locations.
\n\n NONE
: DataSync doesn't run additional verification at the end of\n the transfer. All data transmissions are still integrity-checked with checksum verification\n during the transfer.
A value that determines whether a data integrity verification should be performed at\n the end of a task execution after all data and metadata have been transferred. \n For more information, see \n Configure task settings.\n
\nDefault value: POINT_IN_TIME_CONSISTENT
\n
\n ONLY_FILES_TRANSFERRED
(recommended): Perform verification only on files\n that were transferred.
\n POINT_IN_TIME_CONSISTENT
: Scan the entire source and entire destination at\n the end of the transfer to verify that source and destination are fully synchronized. This\n option isn't supported when transferring to S3 Glacier Flexible Retrieval or S3 Glacier Deep Archive storage classes.
\n NONE
: No additional verification is done at the end of the transfer, but\n all data transmissions are integrity-checked with checksum verification during the\n transfer.
Specifies whether data at the destination location should be overwritten or preserved. If\n set to NEVER
, a destination file for example will not be replaced by a source\n file (even if the destination file differs from the source file). If you modify files in the\n destination and you sync the files, you can use this value to protect against overwriting\n those changes.
Some storage classes have specific behaviors that can affect your Amazon S3\n storage cost. For detailed information, see Considerations\n when working with Amazon S3 storage classes in DataSync .
" + "smithy.api#documentation": "A value that determines whether files at the destination should be overwritten or\n preserved when copying files. If set to NEVER
a destination file will not be\n replaced by a source file, even if the destination file differs from the source file. If you modify files in the destination and you sync the files, you can use this value to\n protect against overwriting those changes.
Some storage classes have specific behaviors that can affect your S3 storage cost. For detailed information, see \n Considerations when working with Amazon S3 storage classes in DataSync \n in the DataSync\n User Guide.
" } }, "Atime": { "target": "com.amazonaws.datasync#Atime", "traits": { - "smithy.api#documentation": "Specifies whether to preserve metadata indicating the last time a file was read or\n written to. If you set Atime
to BEST_EFFORT
, DataSync\n attempts to preserve the original Atime
attribute on all source files (that is,\n the version before the PREPARING
phase of the task execution).
The behavior of Atime
isn't fully standard across platforms, so DataSync can only do this on a best-effort basis.
Default value: BEST_EFFORT
\n
\n BEST_EFFORT
: Attempt to preserve the per-file Atime
value\n (recommended).
\n NONE
: Ignore Atime
.
If Atime
is set to BEST_EFFORT
, Mtime
must be set\n to PRESERVE
.
If Atime
is set to NONE
, Mtime
must also be\n NONE
.
A file metadata value that shows the last time a file was accessed (that is, when the\n file was read or written to). If you set Atime
to BEST_EFFORT
,\n DataSync attempts to preserve the original Atime
attribute on all\n source files (that is, the version before the PREPARING
phase). However,\n Atime
's behavior is not fully standard across platforms, so DataSync can only do this on a best-effort basis.
Default value: BEST_EFFORT
\n
\n BEST_EFFORT
: Attempt to preserve the per-file Atime
value\n (recommended).
\n NONE
: Ignore Atime
.
If Atime
is set to BEST_EFFORT
, Mtime
must be set\n to PRESERVE
.
If Atime
is set to NONE
, Mtime
must also be\n NONE
.
Specifies whether to preserve metadata indicating the last time that a file was written\n to before the PREPARING
phase of your task execution. This option is required\n when you need to run the a task more than once.
Default Value: PRESERVE
\n
\n PRESERVE
: Preserve original Mtime
(recommended)
\n NONE
: Ignore Mtime
.
If Mtime
is set to PRESERVE
, Atime
must be set to\n BEST_EFFORT
.
If Mtime
is set to NONE
, Atime
must also be set\n to NONE
.
A value that indicates the last time that a file was modified (that is, a file was\n written to) before the PREPARING
phase. This option is required for cases when\n you need to run the same task more than one time.
Default Value: PRESERVE
\n
\n PRESERVE
: Preserve original Mtime
(recommended)
\n NONE
: Ignore Mtime
.
If Mtime
is set to PRESERVE
, Atime
must be set to\n BEST_EFFORT
.
If Mtime
is set to NONE
, Atime
must also be set\n to NONE
.
Specifies the POSIX user ID (UID) of the file's owner.
\nFor more information, see Metadata copied by DataSync.
\nDefault value: INT_VALUE
. This preserves the integer value of the ID.
\n INT_VALUE
: Preserve the integer value of UID and group ID (GID)\n (recommended).
\n NONE
: Ignore UID and GID.
The POSIX user ID (UID) of the file's owner.
\nFor more information, see Metadata copied by DataSync.
\nDefault value: INT_VALUE
. This preserves the integer value of the ID.
\n INT_VALUE
: Preserve the integer value of UID and group ID (GID)\n (recommended).
\n NONE
: Ignore UID and GID.
Specifies the POSIX group ID (GID) of the file's owners.
\nFor more information, see Metadata copied by DataSync.
\nDefault value: INT_VALUE
. This preserves the integer value of the ID.
\n INT_VALUE
: Preserve the integer value of user ID (UID) and GID\n (recommended).
\n NONE
: Ignore UID and GID.
The POSIX group ID (GID) of the file's owners.
\nFor more information, see Metadata copied by DataSync.
\nDefault value: INT_VALUE
. This preserves the integer value of the ID.
\n INT_VALUE
: Preserve the integer value of user ID (UID) and GID\n (recommended).
\n NONE
: Ignore UID and GID.
Specifies whether files in the destination location that don't exist in the source\n should be preserved. This option can affect your Amazon S3 storage cost. If your task\n deletes objects, you might incur minimum storage duration charges for certain storage classes.\n For detailed information, see Considerations\n when working with Amazon S3 storage classes in DataSync .
\nDefault value: PRESERVE
\n
\n PRESERVE
: Ignore such destination files (recommended).
\n REMOVE
: Delete destination files that aren’t present in the\n source.
A value that specifies whether files in the destination that don't exist in the source\n file system should be preserved. This option can affect your storage cost. \n If your task deletes objects, you might incur minimum storage duration charges for certain storage classes. For detailed\n information, see Considerations when working with Amazon S3 storage classes in DataSync in the DataSync User\n Guide.
\nDefault value: PRESERVE
\n
\n PRESERVE
: Ignore such destination files (recommended).
\n REMOVE
: Delete destination files that aren’t present in the\n source.
Specifies whether DataSync should preserve the metadata of block and\n character devices in the source location and recreate the files with that device name and\n metadata on the destination. DataSync copies only the name and metadata of such\n devices.
\nDataSync can't copy the actual contents of these devices because they're\n nonterminal and don't return an end-of-file (EOF) marker.
\nDefault value: NONE
\n
\n NONE
: Ignore special devices (recommended).
\n PRESERVE
: Preserve character and block device metadata. This option\n currently isn't supported for Amazon EFS.
A value that determines whether DataSync should preserve the metadata of block\n and character devices in the source file system, and re-create the files with that device name\n and metadata on the destination. DataSync does not copy the contents of such devices, only the\n name and metadata.
\nDataSync can't sync the actual contents of such devices, because they are\n nonterminal and don't return an end-of-file (EOF) marker.
\nDefault value: NONE
\n
\n NONE
: Ignore special devices (recommended).
\n PRESERVE
: Preserve character and block device metadata. This option isn't\n currently supported for Amazon EFS.
Specifies which users or groups can access a file for a specific purpose such as reading,\n writing, or execution of the file.
\nFor more information, see Metadata copied by DataSync.
\nDefault value: PRESERVE
\n
\n PRESERVE
: Preserve POSIX-style permissions (recommended).
\n NONE
: Ignore permissions.
DataSync can preserve extant permissions of a source location.
\nA value that determines which users or groups can access a file for a specific purpose\n such as reading, writing, or execution of the file.
\nFor more information, see Metadata copied by DataSync.
\nDefault value: PRESERVE
\n
\n PRESERVE
: Preserve POSIX-style permissions (recommended).
\n NONE
: Ignore permissions.
DataSync can preserve extant permissions of a source location.
\nLimits the bandwidth used by a DataSync task. For example, if you want\n DataSync to use a maximum of 1 MB, set this value to 1048576
\n (=1024*1024
).
A value that limits the bandwidth used by DataSync. For example, if you want\n DataSync to use a maximum of 1 MB, set this value to 1048576
\n (=1024*1024
).
Specifies whether tasks should be queued before executing the tasks. The default is\n ENABLED
, which means the tasks will be queued.
If you use the same agent to run multiple tasks, you can enable the tasks to run in\n series. For more information, see \n Queueing task executions.
" + "smithy.api#documentation": "A value that determines whether tasks should be queued before executing the tasks. If set\n to ENABLED
, the tasks will be queued. The default is ENABLED
.
If you use the same agent to run multiple tasks, you can enable the tasks to run in\n series. For more information, see \n Queueing task executions.
" } }, "LogLevel": { "target": "com.amazonaws.datasync#LogLevel", "traits": { - "smithy.api#documentation": "Specifies the type of logs that DataSync publishes to a Amazon CloudWatch Logs log\n group. To specify the log group, see CloudWatchLogGroupArn.
\nIf you set LogLevel
to OFF
, no logs are published.\n BASIC
publishes logs on errors for individual files transferred.\n TRANSFER
publishes logs for every file or object that is transferred and\n integrity checked.
A value that determines the type of logs that DataSync publishes to a log stream in the\n Amazon CloudWatch log group that you provide. For more information about providing a log group\n for DataSync, see CloudWatchLogGroupArn. If set to OFF
, no logs are published.\n BASIC
publishes logs on errors for individual files transferred, and\n TRANSFER
publishes logs for every file or object that is transferred and\n integrity checked.
Determines whether DataSync transfers only the data and metadata that differ\n between the source and the destination location or transfers all the content from the source\n (without comparing what's in the destination).
\n\n CHANGED
: DataSync copies only data or metadata that is new or\n different content from the source location to the destination location.
\n ALL
: DataSync copies all source location content to the destination\n (without comparing what's in the destination).
A value that determines whether DataSync transfers only the data and metadata that differ between the source \n and the destination location, or whether DataSync transfers all the content from the source, without comparing to \n the destination location.
\n\n CHANGED
: DataSync copies only data or metadata that is new or\n different content from the source location to the destination location.
\n ALL
: DataSync copies all source location content to the\n destination, without comparing to existing content on the destination.
Specifies which components of the SMB security descriptor are copied from source to\n destination objects.
\nThis value is only used for transfers between SMB and Amazon FSx for Windows File Server\n locations or between two FSx for Windows File Server locations. For more information, see how DataSync handles metadata.
\nDefault value: OWNER_DACL
\n
\n OWNER_DACL
: For each copied object, DataSync copies the following\n metadata:
The object owner.
\nNTFS discretionary access control lists (DACLs), which determine whether to \n grant access to an object.
\nDataSync won't copy NTFS system access control lists (SACLs) with this\n option.
\n\n OWNER_DACL_SACL
: For each copied object, DataSync copies the\n following metadata:
The object owner.
\nNTFS discretionary access control lists (DACLs), which determine whether to\n grant access to an object.
\nSACLs, which are used by administrators to log attempts to access a secured object.
\nCopying SACLs requires granting additional permissions to the Windows user that\n DataSync uses to access your SMB location. For information about choosing a user that\n ensures sufficient permissions to files, folders, and metadata, see user.
\n\n NONE
: None of the SMB security descriptor components are copied. Destination\n objects are owned by the user that was provided for accessing the destination location. DACLs\n and SACLs are set based on the destination server’s configuration.
A value that determines which components of the SMB security descriptor are copied from source\n to destination objects.\n
\nThis value is only used for transfers \n between SMB and Amazon FSx for Windows File Server locations, or between two Amazon FSx for Windows File\n Server locations. For more information about how \n DataSync handles metadata, see\n How DataSync Handles Metadata and Special Files.\n
\nDefault value: OWNER_DACL
\n
\n OWNER_DACL
: For each copied object, DataSync copies the following\n metadata:
Object owner.
\nNTFS discretionary access control lists (DACLs), which determine whether to \n grant access to an object.
\nWhen choosing this option, DataSync does NOT copy the NTFS system access control lists\n (SACLs), which are used by administrators to log attempts to access a secured object.
\n \n\n OWNER_DACL_SACL
: For each copied object, DataSync copies the following\n metadata:
Object owner.
\nNTFS discretionary access control lists (DACLs), which determine whether to\n grant access to an object.
\nNTFS system access control lists (SACLs), which are used by administrators \n to log attempts to access a secured object.
\nCopying SACLs requires granting additional permissions to the Windows user that DataSync\n uses to access your SMB location. For information about choosing a user that ensures\n sufficient permissions to files, folders, and metadata, see user.
\n \n\n NONE
: None of the SMB security descriptor components are copied. Destination\n objects are owned by the user that was provided for accessing the destination location. DACLs\n and SACLs are set based on the destination server’s configuration.
Specifies whether object tags are preserved when transferring between object storage\n systems. If you want your DataSync task to ignore object tags, specify the\n NONE
value.
Default Value: PRESERVE
\n
Specifies whether object tags are maintained when transferring between object storage systems. If you want your DataSync task to ignore object tags, specify the NONE
value.
Default Value: PRESERVE
\n
Configures your DataSync task settings. These options include how DataSync handles files, objects, and their associated metadata. You also can specify how\n DataSync verifies data integrity, set bandwidth limits for your task, among other\n options.
\nEach task setting has a default value. Unless you need to, you don't have to configure\n any of these Options
before starting your task.
Represents the options that are available to control the behavior of a \n StartTaskExecution\n operation. Behavior includes preserving metadata such as user\n ID (UID), group ID (GID), and file permissions, and also overwriting files in the destination,\n data integrity verification, and so on.
\nA task has a set of default options associated with it. If you don't specify an option\n in StartTaskExecution, \n the default value is used. You can override the\n defaults options on each task execution by specifying an overriding Options
value\n to StartTaskExecution.
The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role used to access\n an Amazon S3 bucket.
\nFor detailed information about using such a role, see Creating a Location for\n Amazon S3 in the DataSync User Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) role used to access\n an Amazon S3 bucket.
\n \nFor detailed information about using such a role, see Creating a Location for\n Amazon S3 in the DataSync User Guide.
" } }, "com.amazonaws.datasync#S3StorageClass": { @@ -6217,12 +6178,6 @@ "traits": { "smithy.api#enumValue": "OUTPOSTS" } - }, - "GLACIER_INSTANT_RETRIEVAL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GLACIER_INSTANT_RETRIEVAL" - } } } }, @@ -6380,7 +6335,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts an DataSync task. For each task, you can only run one task execution at a time.
\nThere are several phases to a task execution. For more information, see Task execution statuses.
" + "smithy.api#documentation": "Starts a specific invocation of a task. A TaskExecution
value represents\n an individual run of a task. Each task can have at most one TaskExecution
at a\n time.
\n TaskExecution
has the following transition phases: INITIALIZING |\n PREPARING | TRANSFERRING | VERIFYING | SUCCESS/FAILURE.
For detailed information, see the Task Execution section in the Components and\n Terminology topic in the DataSync User Guide.
" } }, "com.amazonaws.datasync#StartTaskExecutionRequest": { @@ -6389,7 +6344,7 @@ "TaskArn": { "target": "com.amazonaws.datasync#TaskArn", "traits": { - "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the task that you want to start.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the task to start.
", "smithy.api#required": {} } }, @@ -6399,19 +6354,13 @@ "Includes": { "target": "com.amazonaws.datasync#FilterList", "traits": { - "smithy.api#documentation": "Specifies a list of filter rules that determines which files to include when running a\n task. The pattern should contain a single filter string that consists of the patterns to\n include. The patterns are delimited by \"|\" (that is, a pipe), for example,\n \"/folder1|/folder2\"
.
A list of filter rules that determines which files to include when running a task. The\n pattern should contain a single filter string that consists of the patterns to include. The\n patterns are delimited by \"|\" (that is, a pipe), for example,\n \"/folder1|/folder2\"
.
\n
" } }, "Excludes": { "target": "com.amazonaws.datasync#FilterList", "traits": { - "smithy.api#documentation": "Specifies a list of filter rules that determines which files to exclude from a task. The\n list contains a single filter string that consists of the patterns to exclude. The patterns\n are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
Specifies the tags that you want to apply to the Amazon Resource Name (ARN) representing the task execution.
\n\n Tags are key-value pairs that help you manage, filter, and search for your DataSync resources.
" + "smithy.api#documentation": "A list of filter rules that determines which files to exclude from a task. The list\n contains a single filter string that consists of the patterns to exclude. The patterns are\n delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
The ARN of the running task execution.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the specific task execution that was\n started.
" } } }, @@ -6483,7 +6432,7 @@ } }, "traits": { - "smithy.api#documentation": "A key-value pair representing a single tag that's been applied to an Amazon Web Services\n resource.
" + "smithy.api#documentation": "Represents a single entry in a list of Amazon Web Services resource tags. TagListEntry
\n returns an array that contains a list of tasks when the \n ListTagsForResource\n operation is called.
Applies a tag to an Amazon Web Services\n resource. Tags are key-value pairs that can help you manage, filter, and search for your resources.
\nThese include DataSync resources, such as locations, tasks, and task executions.
" + "smithy.api#documentation": "Applies a key-value pair to an Amazon Web Services resource.
" } }, "com.amazonaws.datasync#TagResourceRequest": { @@ -6512,14 +6461,14 @@ "ResourceArn": { "target": "com.amazonaws.datasync#TaggableResourceArn", "traits": { - "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the resource to apply the tag to.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to apply the tag to.
", "smithy.api#required": {} } }, "Tags": { "target": "com.amazonaws.datasync#InputTagList", "traits": { - "smithy.api#documentation": "Specifies the tags that you want to apply to the resource.
", + "smithy.api#documentation": "The tags to apply.
", "smithy.api#required": {} } } @@ -6549,7 +6498,7 @@ "min": 0, "max": 128 }, - "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:(agent|task|location)/(agent|task|loc)-[a-f0-9]{17}(/execution/exec-[a-f0-9]{17})?$" + "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov|aws-iso|aws-iso-b):datasync:[a-z\\-0-9]+:[0-9]{12}:(agent|task|location)/(agent|task|loc)-[0-9a-z]{17}$" } }, "com.amazonaws.datasync#TaskArn": { @@ -6924,7 +6873,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes tags from an Amazon Web Services resource.
" + "smithy.api#documentation": "Removes a tag from an Amazon Web Services resource.
" } }, "com.amazonaws.datasync#UntagResourceRequest": { @@ -6933,14 +6882,14 @@ "ResourceArn": { "target": "com.amazonaws.datasync#TaggableResourceArn", "traits": { - "smithy.api#documentation": "Specifies the Amazon Resource Name (ARN) of the resource to remove the tags from.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource to remove the tag from.
", "smithy.api#required": {} } }, "Keys": { "target": "com.amazonaws.datasync#TagKeyList", "traits": { - "smithy.api#documentation": "Specifies the keys in the tags that you want to remove.
", + "smithy.api#documentation": "The keys in the key-value pair in the tag to remove.
", "smithy.api#required": {} } } @@ -7139,7 +7088,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#NfsSubdirectory", "traits": { - "smithy.api#documentation": "The subdirectory in the NFS file system that is used to read data from the NFS source\n location or write data to the NFS destination. The NFS path should be a path that's\n exported by the NFS server, or a subdirectory of that path. The path should be such that it\n can be mounted by other NFS clients in your network.
\nTo see all the paths exported by your NFS server, run \"showmount -e\n nfs-server-name
\" from an NFS client that has access to your server. You can specify\n any directory that appears in the results, and any subdirectory of that directory. Ensure that\n the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have\n permissions to read all the data. To ensure this, either configure the NFS export with\n no_root_squash
, or ensure that the files you want DataSync to\n access have permissions that allow read access for all users. Doing either option enables the\n agent to read the files. For the agent to access directories, you must additionally enable all\n execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
\nFor information about NFS export configuration, see 18.7. The /etc/exports\n Configuration File in the Red Hat Enterprise Linux documentation.
" + "smithy.api#documentation": "The subdirectory in the NFS file system that is used to read data from the NFS source\n location or write data to the NFS destination. The NFS path should be a path that's\n exported by the NFS server, or a subdirectory of that path. The path should be such that it\n can be mounted by other NFS clients in your network.
\n \nTo see all the paths exported by your NFS server, run \"showmount -e\n nfs-server-name
\" from an NFS client that has access to your server. You can specify\n any directory that appears in the results, and any subdirectory of that directory. Ensure that\n the NFS export is accessible without Kerberos authentication.
To transfer all the data in the folder that you specified, DataSync must have\n permissions to read all the data. To ensure this, either configure the NFS export with\n no_root_squash
, or ensure that the files you want DataSync to\n access have permissions that allow read access for all users. Doing either option enables the\n agent to read the files. For the agent to access directories, you must additionally enable all\n execute access.
If you are copying data to or from your Snowcone device, see NFS Server on Snowcone for more information.
\n \nFor information about NFS export configuration, see 18.7. The /etc/exports\n Configuration File in the Red Hat Enterprise Linux documentation.
" } }, "OnPremConfig": { @@ -7265,7 +7214,7 @@ "Subdirectory": { "target": "com.amazonaws.datasync#SmbSubdirectory", "traits": { - "smithy.api#documentation": "The subdirectory in the SMB file system that is used to read data from the SMB source\n location or write data to the SMB destination. The SMB path should be a path that's\n exported by the SMB server, or a subdirectory of that path. The path should be such that it\n can be mounted by other SMB clients in your network.
\n\n Subdirectory
must be specified with forward slashes. For example,\n /path/to/folder
.
To transfer all the data in the folder that you specified, DataSync must have\n permissions to mount the SMB share and to access all the data in that share. To ensure this,\n do either of the following:
\nEnsure that the user/password specified belongs to the user who can mount the share\n and who has the appropriate permissions for all of the files and directories that you want\n DataSync to access.
\nUse credentials of a member of the Backup Operators group to mount the share.
\nDoing either of these options enables the agent to access the data. For the agent to\n access directories, you must also enable all execute access.
" + "smithy.api#documentation": "The subdirectory in the SMB file system that is used to read data from the SMB source\n location or write data to the SMB destination. The SMB path should be a path that's\n exported by the SMB server, or a subdirectory of that path. The path should be such that it\n can be mounted by other SMB clients in your network.
\n\n Subdirectory
must be specified with forward slashes. For example,\n /path/to/folder
.
To transfer all the data in the folder that you specified, DataSync must have\n permissions to mount the SMB share and to access all the data in that share. To ensure this,\n do either of the following:
\nEnsure that the user/password specified belongs to the user who can mount the share\n and who has the appropriate permissions for all of the files and directories that you want\n DataSync to access.
\nUse credentials of a member of the Backup Operators group to mount the share.
\nDoing either of these options enables the agent to access the data. For the agent to\n access directories, you must also enable all execute access.
" } }, "User": { @@ -7338,7 +7287,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates execution of a task.
\nYou can modify bandwidth throttling for a task execution that is running or queued.\n For more information, see Adjusting Bandwidth Throttling for a Task Execution.
\nThe only Option
that can be modified by UpdateTaskExecution
\n is \n BytesPerSecond\n
.
Updates execution of a task.
\nYou can modify bandwidth throttling for a task execution that is running or queued.\n For more information, see Adjusting Bandwidth Throttling for a Task Execution.
\n \nThe only Option
that can be modified by UpdateTaskExecution
\n is \n BytesPerSecond\n
.
Specifies a list of filter rules that exclude specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to exclude from a task. The list should\n contain a single filter string that consists of the patterns to exclude. The patterns are\n delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
\n
" } }, "Schedule": { @@ -7403,7 +7352,7 @@ "Includes": { "target": "com.amazonaws.datasync#FilterList", "traits": { - "smithy.api#documentation": "Specifies a list of filter rules that include specific data during your transfer. For more information and examples, see Filtering data transferred by DataSync.
" + "smithy.api#documentation": "A list of filter rules that determines which files to include when running a task. The\n pattern contains a single filter string that consists of the patterns to include. The patterns\n are delimited by \"|\" (that is, a pipe), for example, \"/folder1|/folder2\"
.
Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more\n information, see Cancel having an AMI shared with your Amazon Web Services account \n in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Removes your Amazon Web Services account from the launch permissions for the specified AMI. For more\n information, see Cancel having an AMI shared with your Amazon Web Services account \n in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CancelImageLaunchPermissionRequest": { @@ -13184,7 +13184,7 @@ "target": "com.amazonaws.ec2#CopyImageResult" }, "traits": { - "smithy.api#documentation": "Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a\n Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost\n to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.
\nTo copy an AMI from one Region to another, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tdestination Region using its endpoint. Copies of encrypted backing snapshots for\n \t\tthe AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, \n \t\tunless you set Encrypted
during the copy operation. You cannot \n \t\tcreate an unencrypted copy of an encrypted backing snapshot.
To copy an AMI from a Region to an Outpost, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tARN of the destination Outpost using DestinationOutpostArn. \n \t\tBacking snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t\tsnapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon EC2 User Guide.
\nFor more information about the prerequisites and limits when copying an AMI, see Copy an AMI in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a\n Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost\n to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.
\nTo copy an AMI from one Region to another, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tdestination Region using its endpoint. Copies of encrypted backing snapshots for\n \t\tthe AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, \n \t\tunless you set Encrypted
during the copy operation. You cannot \n \t\tcreate an unencrypted copy of an encrypted backing snapshot.
To copy an AMI from a Region to an Outpost, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tARN of the destination Outpost using DestinationOutpostArn. \n \t\tBacking snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t\tsnapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
\nFor more information about the prerequisites and limits when copying an AMI, see Copy an AMI in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CopyImageRequest": { @@ -13208,7 +13208,7 @@ "aws.protocols#ec2QueryName": "Encrypted", "smithy.api#clientOptional": {}, "smithy.api#default": false, - "smithy.api#documentation": "Specifies whether the destination snapshots of the copied image should be encrypted. You\n can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an\n encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default\n Key Management Service (KMS) KMS key using KmsKeyId
. For more information, see Amazon EBS encryption in the\n Amazon EC2 User Guide.
Specifies whether the destination snapshots of the copied image should be encrypted. You\n can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted copy of an\n encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default\n Key Management Service (KMS) KMS key using KmsKeyId
. For more information, see Amazon EBS encryption in the\n Amazon Elastic Compute Cloud User Guide.
The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.
\nFor more information, see Copy AMIs from an Amazon Web Services\n Region to an Outpost in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.
\nFor more information, see Copy AMIs from an Amazon Web Services\n Region to an Outpost in the Amazon Elastic Compute Cloud User Guide.
" } }, "DryRun": { @@ -17084,7 +17084,7 @@ "target": "com.amazonaws.ec2#CreateRestoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "Starts a task that restores an AMI from an Amazon S3 object that was previously created by using\n CreateStoreImageTask.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Starts a task that restores an AMI from an Amazon S3 object that was previously created by using\n CreateStoreImageTask.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateRestoreImageTaskRequest": { @@ -17627,7 +17627,7 @@ "target": "com.amazonaws.ec2#CreateStoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "Stores an AMI as a single object in an Amazon S3 bucket.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Stores an AMI as a single object in an Amazon S3 bucket.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateStoreImageTaskRequest": { @@ -24635,7 +24635,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "Deregisters the specified AMI. After you deregister an AMI, it can't be used to \n launch new instances.
\nIf you deregister an AMI that matches a Recycle Bin retention rule, the AMI is retained\n in the Recycle Bin for the specified retention period. For more information, see Recycle Bin in\n the Amazon EC2 User Guide.
\nWhen you deregister an AMI, it doesn't affect any instances that you've already \n launched from the AMI. You'll continue to incur usage costs for those instances until \n you terminate them.
\nWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.
" + "smithy.api#documentation": "Deregisters the specified AMI. After you deregister an AMI, it can't be used to \n launch new instances.
\nIf you deregister an AMI that matches a Recycle Bin retention rule, the AMI is retained\n in the Recycle Bin for the specified retention period. For more information, see Recycle Bin in\n the Amazon Elastic Compute Cloud User Guide.
\nWhen you deregister an AMI, it doesn't affect any instances that you've already \n launched from the AMI. You'll continue to incur usage costs for those instances until \n you terminate them.
\nWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.
" } }, "com.amazonaws.ec2#DeregisterImageRequest": { @@ -28456,12 +28456,6 @@ }, "traits": { "smithy.api#documentation": "Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
\nThe images available to you include public images, private images that you own, and private images owned by other \n Amazon Web Services accounts for which you have explicit launch permissions.
\nRecently deregistered images appear in the returned results for a short interval and then\n return empty results. After all instances that reference a deregistered AMI are terminated,\n specifying the ID of the image will eventually return an error indicating that the AMI ID\n cannot be found.
", - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "items": "Images", - "pageSize": "MaxResults" - }, "smithy.api#suppress": [ "WaitableTraitInvalidErrorType" ], @@ -28563,20 +28557,6 @@ "smithy.api#documentation": "Checks whether you have the required permissions for the action, without actually making the request, \n\t\t\tand provides an error response. If you have the required permissions, the error response is \n\t\t\tDryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken
value.
The token for the next page of results.
" - } } } }, @@ -28590,14 +28570,6 @@ "smithy.api#documentation": "Information about the images.
", "smithy.api#xmlName": "imagesSet" } - }, - "NextToken": { - "target": "com.amazonaws.ec2#String", - "traits": { - "aws.protocols#ec2QueryName": "NextToken", - "smithy.api#documentation": "The token to use to retrieve the next page of results. This value is null
when there are no more results to return.
Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.
\nFor each AMI task, the response indicates if the task is InProgress
,\n Completed
, or Failed
. For tasks InProgress
, the\n response shows the estimated progress as a percentage.
Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon EC2 User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.
\nFor each AMI task, the response indicates if the task is InProgress
,\n Completed
, or Failed
. For tasks InProgress
, the\n response shows the estimated progress as a percentage.
Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -38015,13 +37987,13 @@ } }, "InstanceId": { - "target": "com.amazonaws.ec2#InstanceIdForResolver", + "target": "com.amazonaws.ec2#InstanceId", "traits": { "smithy.api#documentation": "The ID of the instance. If you are detaching a Multi-Attach enabled volume, you must specify an instance ID.
" } }, "VolumeId": { - "target": "com.amazonaws.ec2#VolumeIdWithResolver", + "target": "com.amazonaws.ec2#VolumeId", "traits": { "smithy.api#clientOptional": {}, "smithy.api#documentation": "The ID of the volume.
", @@ -38807,7 +38779,7 @@ "target": "com.amazonaws.ec2#DisableImageDeprecationResult" }, "traits": { - "smithy.api#documentation": "Cancels the deprecation of the specified AMI.
\nFor more information, see Deprecate an AMI in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Cancels the deprecation of the specified AMI.
\nFor more information, see Deprecate an AMI in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#DisableImageDeprecationRequest": { @@ -41517,7 +41489,7 @@ "target": "com.amazonaws.ec2#EnableImageDeprecationResult" }, "traits": { - "smithy.api#documentation": "Enables deprecation of the specified AMI at the specified date and time.
\nFor more information, see Deprecate an AMI in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Enables deprecation of the specified AMI at the specified date and time.
\nFor more information, see Deprecate an AMI in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#EnableImageDeprecationRequest": { @@ -49384,7 +49356,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "PlatformDetails", - "smithy.api#documentation": "The platform details associated with the billing code of the AMI. For more information,\n see Understand\n AMI billing information in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "The platform details associated with the billing code of the AMI. For more information,\n see Understand\n AMI billing information in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#xmlName": "platformDetails" } }, @@ -49522,7 +49494,7 @@ "target": "com.amazonaws.ec2#BootModeValues", "traits": { "aws.protocols#ec2QueryName": "BootMode", - "smithy.api#documentation": "The boot mode of the image. For more information, see Boot modes in the\n Amazon EC2 User Guide.
", + "smithy.api#documentation": "The boot mode of the image. For more information, see Boot modes in the\n Amazon Elastic Compute Cloud User Guide.
", "smithy.api#xmlName": "bootMode" } }, @@ -49530,7 +49502,7 @@ "target": "com.amazonaws.ec2#TpmSupportValues", "traits": { "aws.protocols#ec2QueryName": "TpmSupport", - "smithy.api#documentation": "If the image is configured for NitroTPM support, the value is v2.0
. \n For more information, see NitroTPM in the\n Amazon EC2 User Guide.
If the image is configured for NitroTPM support, the value is v2.0
. \n For more information, see NitroTPM in the\n Amazon Elastic Compute Cloud User Guide.
If v2.0
, it indicates that IMDSv2 is specified in the AMI. Instances launched\n from this AMI will have HttpTokens
automatically set to required
so\n that, by default, the instance requires that IMDSv2 is used when requesting instance metadata.\n In addition, HttpPutResponseHopLimit
is set to 2
. For more\n information, see Configure\n the AMI in the Amazon EC2 User Guide.
If v2.0
, it indicates that IMDSv2 is specified in the AMI. Instances launched\n from this AMI will have HttpTokens
automatically set to required
so\n that, by default, the instance requires that IMDSv2 is used when requesting instance metadata.\n In addition, HttpPutResponseHopLimit
is set to 2
. For more\n information, see Configure\n the AMI in the Amazon Elastic Compute Cloud User Guide.
Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data,\n use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the\n python-uefivars tool on\n GitHub. For more information, see UEFI Secure Boot in the\n Amazon EC2 User Guide.
", + "smithy.api#documentation": "Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data,\n use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the\n python-uefivars tool on\n GitHub. For more information, see UEFI Secure Boot in the\n Amazon Elastic Compute Cloud User Guide.
", "smithy.api#xmlName": "uefiData" } }, @@ -49658,7 +49630,7 @@ "target": "com.amazonaws.ec2#AttributeValue", "traits": { "aws.protocols#ec2QueryName": "ImdsSupport", - "smithy.api#documentation": "If v2.0
, it indicates that IMDSv2 is specified in the AMI. Instances launched\n from this AMI will have HttpTokens
automatically set to required
so\n that, by default, the instance requires that IMDSv2 is used when requesting instance metadata.\n In addition, HttpPutResponseHopLimit
is set to 2
. For more\n information, see Configure\n the AMI in the Amazon EC2 User Guide.
If v2.0
, it indicates that IMDSv2 is specified in the AMI. Instances launched\n from this AMI will have HttpTokens
automatically set to required
so\n that, by default, the instance requires that IMDSv2 is used when requesting instance metadata.\n In addition, HttpPutResponseHopLimit
is set to 2
. For more\n information, see Configure\n the AMI in the Amazon Elastic Compute Cloud User Guide.
Lists one or more AMIs that are currently in the Recycle Bin. For more information, \n see Recycle\n Bin in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "Lists one or more AMIs that are currently in the Recycle Bin. For more information, \n see Recycle\n Bin in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -74949,7 +74918,7 @@ "target": "com.amazonaws.ec2#RegisterImageResult" }, "traits": { - "smithy.api#documentation": "Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Create your\n own AMI in the Amazon Elastic Compute Cloud User Guide.
\nFor Amazon EBS-backed instances, CreateImage creates and registers the AMI\n in a single request, so you don't have to register the AMI yourself. We recommend that you\n always use CreateImage unless you have a specific reason to use\n RegisterImage.
\nIf needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.
\n\n Register a snapshot of a root device volume\n
\nYou can use RegisterImage
to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.
\n\n Amazon Web Services Marketplace product codes\n
\nIf any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new\n AMI.
\nWindows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:
\nLaunch an instance from an existing AMI with that billing product code.
\nCustomize the instance.
\nCreate an AMI from the instance using CreateImage.
\nIf you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Understand AMI\n billing information in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Create your\n own AMI in the Amazon Elastic Compute Cloud User Guide.
\nFor Amazon EBS-backed instances, CreateImage creates and registers the AMI\n in a single request, so you don't have to register the AMI yourself. We recommend that you\n always use CreateImage unless you have a specific reason to use\n RegisterImage.
\nIf needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.
\n\n Register a snapshot of a root device volume\n
\nYou can use RegisterImage
to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.
\n\n Amazon Web Services Marketplace product codes\n
\nIf any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new\n AMI.
\nWindows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:
\nLaunch an instance from an existing AMI with that billing product code.
\nCustomize the instance.
\nCreate an AMI from the instance using CreateImage.
\nIf you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Understand AMI\n billing information in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#RegisterImageRequest": { @@ -74972,7 +74941,7 @@ "BlockDeviceMappings": { "target": "com.amazonaws.ec2#BlockDeviceMappingRequestList", "traits": { - "smithy.api#documentation": "The block device mapping entries.
\nIf you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.
\nIf you create an AMI on an Outpost, then all backing snapshots must be on the same\n Outpost or in the Region of that Outpost. AMIs on an Outpost that include local snapshots can\n be used to launch instances on the same Outpost only. For more information, Amazon EBS local\n snapshots on Outposts in the Amazon EC2 User Guide.
", + "smithy.api#documentation": "The block device mapping entries.
\nIf you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.
\nIf you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region \n \t of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost \n \t only. For more information, \n \t \tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#xmlName": "BlockDeviceMapping" } }, @@ -75025,7 +74994,7 @@ "BillingProducts": { "target": "com.amazonaws.ec2#BillingProductList", "traits": { - "smithy.api#documentation": "The billing product codes. Your account must be authorized to specify billing product codes.
\nIf your account is not authorized to specify billing product codes, you can publish AMIs\n that include billable software and list them on the Amazon Web Services Marketplace. You must first register as a seller\n on the Amazon Web Services Marketplace. For more information, see Getting started as a\n seller and AMI-based\n products in the Amazon Web Services Marketplace Seller Guide.
", + "smithy.api#documentation": "The billing product codes. Your account must be authorized to specify billing product codes. Otherwise,\n \tyou can use the Amazon Web Services Marketplace to bill for the use of an AMI.
", "smithy.api#xmlName": "BillingProduct" } }, @@ -75064,25 +75033,25 @@ "BootMode": { "target": "com.amazonaws.ec2#BootModeValues", "traits": { - "smithy.api#documentation": "The boot mode of the AMI. For more information, see Boot modes in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "The boot mode of the AMI. For more information, see Boot modes in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "TpmSupport": { "target": "com.amazonaws.ec2#TpmSupportValues", "traits": { - "smithy.api#documentation": "Set to v2.0
to enable Trusted Platform Module (TPM) support. For more\n information, see NitroTPM in the Amazon EC2 User Guide.
Set to v2.0
to enable Trusted Platform Module (TPM) support. For more\n information, see NitroTPM in the Amazon Elastic Compute Cloud User Guide.
Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data,\n use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the\n python-uefivars tool on\n GitHub. For more information, see UEFI Secure Boot in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Base64 representation of the non-volatile UEFI variable store. To retrieve the UEFI data,\n use the GetInstanceUefiData command. You can inspect and modify the UEFI data by using the\n python-uefivars tool on\n GitHub. For more information, see UEFI Secure Boot in the\n Amazon Elastic Compute Cloud User Guide.
" } }, "ImdsSupport": { "target": "com.amazonaws.ec2#ImdsSupportValues", "traits": { - "smithy.api#documentation": "Set to v2.0
to indicate that IMDSv2 is specified in the AMI. Instances\n launched from this AMI will have HttpTokens
automatically set to\n required
so that, by default, the instance requires that IMDSv2 is used when\n requesting instance metadata. In addition, HttpPutResponseHopLimit
is set to\n 2
. For more information, see Configure\n the AMI in the Amazon EC2 User Guide.
If you set the value to v2.0
, make sure that your AMI software can support IMDSv2.
Set to v2.0
to indicate that IMDSv2 is specified in the AMI. Instances\n launched from this AMI will have HttpTokens
automatically set to\n required
so that, by default, the instance requires that IMDSv2 is used when\n requesting instance metadata. In addition, HttpPutResponseHopLimit
is set to\n 2
. For more information, see Configure\n the AMI in the Amazon Elastic Compute Cloud User Guide.
If you set the value to v2.0
, make sure that your AMI software can support IMDSv2.
Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon EC2 User Guide.
" + "smithy.api#documentation": "Restores an AMI from the Recycle Bin. For more information, see Recycle Bin in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#RestoreImageFromRecycleBinRequest": { @@ -93345,9 +93314,6 @@ } } }, - "com.amazonaws.ec2#VolumeIdWithResolver": { - "type": "string" - }, "com.amazonaws.ec2#VolumeList": { "type": "list", "member": { diff --git a/codegen/sdk/aws-models/ecs.json b/codegen/sdk/aws-models/ecs.json index 84a400d2e78..1a7aadcfce9 100644 --- a/codegen/sdk/aws-models/ecs.json +++ b/codegen/sdk/aws-models/ecs.json @@ -264,7 +264,7 @@ "name": "ecs" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes\n\t\t\tit easy to run, stop, and manage Docker containers. You can host your cluster on a\n\t\t\tserverless infrastructure that's managed by Amazon ECS by launching your services or tasks on\n\t\t\tFargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2)\n\t\t\tor External (on-premises) instances that you manage.
\nAmazon ECS makes it easy to launch and stop container-based applications with simple API\n\t\t\tcalls. This makes it easy to get the state of your cluster from a centralized service,\n\t\t\tand gives you access to many familiar Amazon EC2 features.
\nYou can use Amazon ECS to schedule the placement of containers across your cluster based on\n\t\t\tyour resource needs, isolation policies, and availability requirements. With Amazon ECS, you\n\t\t\tdon't need to operate your own cluster management and configuration management systems.\n\t\t\tYou also don't need to worry about scaling your management infrastructure.
", + "smithy.api#documentation": "Amazon Elastic Container Service (Amazon ECS) is a highly scalable, fast, container management service. It makes\n\t\t\tit easy to run, stop, and manage Docker containers. You can host your cluster on a\n\t\t\tserverless infrastructure that's managed by Amazon ECS by launching your services or tasks on\n\t\t\tFargate. For more control, you can host your tasks on a cluster of Amazon Elastic Compute Cloud (Amazon EC2)\n\t\t\tor External (on-premises) instances that you manage.
\n\t\tAmazon ECS makes it easy to launch and stop container-based applications with simple API\n\t\t\tcalls. This makes it easy to get the state of your cluster from a centralized service,\n\t\t\tand gives you access to many familiar Amazon EC2 features.
\n\t\tYou can use Amazon ECS to schedule the placement of containers across your cluster based on\n\t\t\tyour resource needs, isolation policies, and availability requirements. With Amazon ECS, you\n\t\t\tdon't need to operate your own cluster management and configuration management systems.\n\t\t\tYou also don't need to worry about scaling your management infrastructure.
", "smithy.api#title": "Amazon EC2 Container Service", "smithy.api#xmlNamespace": { "uri": "http://ecs.amazonaws.com/doc/2014-11-13/" @@ -323,6 +323,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -585,9 +594,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true + "Region": "ap-south-2" } }, { @@ -598,9 +607,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false + "Region": "ap-south-2" } }, { @@ -611,9 +620,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true + "Region": "ap-south-2" } }, { @@ -624,9 +633,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false + "Region": "ap-south-2" } }, { @@ -637,9 +646,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true + "Region": "ap-south-1" } }, { @@ -650,9 +659,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false + "Region": "ap-south-1" } }, { @@ -663,9 +672,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true + "Region": "ap-south-1" } }, { @@ -676,9 +685,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false + "Region": "ap-south-1" } }, { @@ -689,9 +698,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true + "Region": "eu-south-1" } }, { @@ -702,9 +711,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false + "Region": "eu-south-1" } }, { @@ -715,9 +724,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true + "Region": "eu-south-1" } }, { @@ -728,9 +737,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false + "Region": "eu-south-1" } }, { @@ -741,9 +750,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true + "Region": "eu-south-2" } }, { @@ -754,9 +763,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false + "Region": "eu-south-2" } }, { @@ -767,9 +776,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true + "Region": "eu-south-2" } }, { @@ -780,9 +789,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false + "Region": "eu-south-2" } }, { @@ -793,9 +802,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -806,9 +815,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -819,9 +828,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true + "Region": "us-gov-east-1" } }, { @@ -832,9 +841,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false + "Region": "us-gov-east-1" } }, { @@ -845,9 +854,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true + "Region": "me-central-1" } }, { @@ -858,9 +867,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false + "Region": "me-central-1" } }, { @@ -871,9 +880,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true + "Region": "me-central-1" } }, { @@ -884,9 +893,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false + "Region": "me-central-1" } }, { @@ -897,9 +906,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true + "Region": "ca-central-1" } }, { @@ -910,9 +919,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false + "Region": "ca-central-1" } }, { @@ -923,9 +932,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true + "Region": "ca-central-1" } }, { @@ -936,9 +945,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false + "Region": "ca-central-1" } }, { @@ -949,9 +958,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true + "Region": "eu-central-1" } }, { @@ -962,9 +971,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false + "Region": "eu-central-1" } }, { @@ -975,9 +984,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true + "Region": "eu-central-1" } }, { @@ -988,9 +997,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false + "Region": "eu-central-1" } }, { @@ -999,9 +1008,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-iso-west-1", - "UseDualStack": true + "Region": "us-iso-west-1" } }, { @@ -1012,9 +1021,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-iso-west-1", - "UseDualStack": false + "Region": "us-iso-west-1" } }, { @@ -1023,9 +1032,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-iso-west-1", - "UseDualStack": true + "Region": "us-iso-west-1" } }, { @@ -1036,9 +1045,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-iso-west-1", - "UseDualStack": false + "Region": "us-iso-west-1" } }, { @@ -1049,9 +1058,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true + "Region": "eu-central-2" } }, { @@ -1062,9 +1071,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false + "Region": "eu-central-2" } }, { @@ -1075,9 +1084,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true + "Region": "eu-central-2" } }, { @@ -1088,9 +1097,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false + "Region": "eu-central-2" } }, { @@ -1101,9 +1110,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true + "Region": "us-west-1" } }, { @@ -1114,9 +1123,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false + "Region": "us-west-1" } }, { @@ -1127,9 +1136,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true + "Region": "us-west-1" } }, { @@ -1140,9 +1149,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false + "Region": "us-west-1" } }, { @@ -1153,9 +1162,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true + "Region": "us-west-2" } }, { @@ -1166,9 +1175,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false + "Region": "us-west-2" } }, { @@ -1179,9 +1188,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true + "Region": "us-west-2" } }, { @@ -1192,9 +1201,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false + "Region": "us-west-2" } }, { @@ -1205,9 +1214,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true + "Region": "af-south-1" } }, { @@ -1218,9 +1227,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false + "Region": "af-south-1" } }, { @@ -1231,9 +1240,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true + "Region": "af-south-1" } }, { @@ -1244,9 +1253,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false + "Region": "af-south-1" } }, { @@ -1257,9 +1266,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true + "Region": "eu-north-1" } }, { @@ -1270,9 +1279,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false + "Region": "eu-north-1" } }, { @@ -1283,9 +1292,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true + "Region": "eu-north-1" } }, { @@ -1296,9 +1305,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false + "Region": "eu-north-1" } }, { @@ -1309,9 +1318,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true + "Region": "eu-west-3" } }, { @@ -1322,9 +1331,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false + "Region": "eu-west-3" } }, { @@ -1335,9 +1344,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true + "Region": "eu-west-3" } }, { @@ -1348,9 +1357,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false + "Region": "eu-west-3" } }, { @@ -1361,9 +1370,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true + "Region": "eu-west-2" } }, { @@ -1374,9 +1383,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false + "Region": "eu-west-2" } }, { @@ -1387,9 +1396,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true + "Region": "eu-west-2" } }, { @@ -1400,9 +1409,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false + "Region": "eu-west-2" } }, { @@ -1413,9 +1422,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "eu-west-1" } }, { @@ -1426,9 +1435,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false + "Region": "eu-west-1" } }, { @@ -1439,9 +1448,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true + "Region": "eu-west-1" } }, { @@ -1452,9 +1461,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false + "Region": "eu-west-1" } }, { @@ -1465,9 +1474,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true + "Region": "ap-northeast-3" } }, { @@ -1478,9 +1487,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false + "Region": "ap-northeast-3" } }, { @@ -1491,9 +1500,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true + "Region": "ap-northeast-3" } }, { @@ -1504,9 +1513,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false + "Region": "ap-northeast-3" } }, { @@ -1517,9 +1526,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true + "Region": "ap-northeast-2" } }, { @@ -1530,9 +1539,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false + "Region": "ap-northeast-2" } }, { @@ -1543,9 +1552,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true + "Region": "ap-northeast-2" } }, { @@ -1556,9 +1565,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false + "Region": "ap-northeast-2" } }, { @@ -1569,9 +1578,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "ap-northeast-1" } }, { @@ -1582,9 +1591,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false + "Region": "ap-northeast-1" } }, { @@ -1595,9 +1604,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true + "Region": "ap-northeast-1" } }, { @@ -1608,9 +1617,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false + "Region": "ap-northeast-1" } }, { @@ -1621,9 +1630,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true + "Region": "me-south-1" } }, { @@ -1634,9 +1643,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false + "Region": "me-south-1" } }, { @@ -1647,9 +1656,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true + "Region": "me-south-1" } }, { @@ -1660,9 +1669,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false + "Region": "me-south-1" } }, { @@ -1673,9 +1682,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true + "Region": "sa-east-1" } }, { @@ -1686,9 +1695,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false + "Region": "sa-east-1" } }, { @@ -1699,9 +1708,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true + "Region": "sa-east-1" } }, { @@ -1712,9 +1721,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false + "Region": "sa-east-1" } }, { @@ -1725,9 +1734,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true + "Region": "ap-east-1" } }, { @@ -1738,9 +1747,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": false + "Region": "ap-east-1" } }, { @@ -1751,9 +1760,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true + "Region": "ap-east-1" } }, { @@ -1764,9 +1773,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": false + "Region": "ap-east-1" } }, { @@ -1777,9 +1786,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -1790,9 +1799,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -1803,9 +1812,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "Region": "cn-north-1" } }, { @@ -1816,9 +1825,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": false + "Region": "cn-north-1" } }, { @@ -1829,9 +1838,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true + "Region": "us-gov-west-1" } }, { @@ -1842,9 +1851,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": false + "Region": "us-gov-west-1" } }, { @@ -1855,9 +1864,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "Region": "us-gov-west-1" } }, { @@ -1868,9 +1877,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": false + "Region": "us-gov-west-1" } }, { @@ -1881,9 +1890,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "ap-southeast-1" } }, { @@ -1894,9 +1903,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": false + "Region": "ap-southeast-1" } }, { @@ -1907,9 +1916,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "Region": "ap-southeast-1" } }, { @@ -1920,9 +1929,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": false + "Region": "ap-southeast-1" } }, { @@ -1933,9 +1942,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "ap-southeast-2" } }, { @@ -1946,9 +1955,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": false + "Region": "ap-southeast-2" } }, { @@ -1959,9 +1968,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "Region": "ap-southeast-2" } }, { @@ -1972,9 +1981,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": false + "Region": "ap-southeast-2" } }, { @@ -1983,9 +1992,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -1996,9 +2005,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -2007,9 +2016,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "Region": "us-iso-east-1" } }, { @@ -2020,9 +2029,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": false + "Region": "us-iso-east-1" } }, { @@ -2033,9 +2042,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "ap-southeast-3" } }, { @@ -2046,9 +2055,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": false + "Region": "ap-southeast-3" } }, { @@ -2059,9 +2068,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "Region": "ap-southeast-3" } }, { @@ -2072,9 +2081,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": false + "Region": "ap-southeast-3" } }, { @@ -2085,9 +2094,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": true + "Region": "ap-southeast-4" } }, { @@ -2098,9 +2107,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": false + "Region": "ap-southeast-4" } }, { @@ -2111,9 +2120,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "ap-southeast-4", - "UseDualStack": true + "Region": "ap-southeast-4" } }, { @@ -2124,9 +2133,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "ap-southeast-4", - "UseDualStack": false + "Region": "ap-southeast-4" } }, { @@ -2137,9 +2146,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -2150,9 +2159,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -2163,9 +2172,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "Region": "us-east-1" } }, { @@ -2176,9 +2185,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": false + "Region": "us-east-1" } }, { @@ -2189,9 +2198,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "Region": "us-east-2" } }, { @@ -2202,9 +2211,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false + "Region": "us-east-2" } }, { @@ -2215,9 +2224,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true + "Region": "us-east-2" } }, { @@ -2228,9 +2237,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "Region": "us-east-2" } }, { @@ -2241,9 +2250,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": true + "Region": "cn-northwest-1" } }, { @@ -2254,9 +2263,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": false + "Region": "cn-northwest-1" } }, { @@ -2267,9 +2276,9 @@ } }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": true + "Region": "cn-northwest-1" } }, { @@ -2280,9 +2289,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": false + "Region": "cn-northwest-1" } }, { @@ -2291,9 +2300,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { + "UseDualStack": true, "UseFIPS": true, - "Region": "us-isob-east-1", - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -2304,9 +2313,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": true, - "Region": "us-isob-east-1", - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -2315,9 +2324,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { + "UseDualStack": true, "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": true + "Region": "us-isob-east-1" } }, { @@ -2328,9 +2337,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": false + "Region": "us-isob-east-1" } }, { @@ -2341,9 +2350,9 @@ } }, "params": { + "UseDualStack": false, "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -2353,9 +2362,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { + "UseDualStack": false, "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": false, "Endpoint": "https://example.com" } }, @@ -2365,9 +2374,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { + "UseDualStack": true, "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": true, "Endpoint": "https://example.com" } } @@ -2558,7 +2567,7 @@ "managedTerminationProtection": { "target": "com.amazonaws.ecs#ManagedTerminationProtection", "traits": { - "smithy.api#documentation": "The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection. The default is disabled.
\nWhen using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.
\nWhen managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in\n\t\t\tan Auto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.
\nWhen managed termination protection is disabled, your Amazon EC2 instances aren't protected\n\t\t\tfrom termination when the Auto Scaling group scales in.
" + "smithy.api#documentation": "The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection. The default is disabled.
\n\t\tWhen using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.
\n\t\tWhen managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in\n\t\t\tan Auto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions enabled as well. For more information, see Instance Protection in the Auto Scaling User Guide.
\n\t\tWhen managed termination protection is disabled, your Amazon EC2 instances aren't protected\n\t\t\tfrom termination when the Auto Scaling group scales in.
" } } }, @@ -2578,7 +2587,7 @@ "managedTerminationProtection": { "target": "com.amazonaws.ecs#ManagedTerminationProtection", "traits": { - "smithy.api#documentation": "The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection.
\nWhen using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.
\nWhen managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in\n\t\t\tan Auto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions enabled. For more information, see Instance Protection in the Auto Scaling User Guide.
\nWhen managed termination protection is disabled, your Amazon EC2 instances aren't protected\n\t\t\tfrom termination when the Auto Scaling group scales in.
" + "smithy.api#documentation": "The managed termination protection setting to use for the Auto Scaling group capacity\n\t\t\tprovider. This determines whether the Auto Scaling group has managed termination\n\t\t\tprotection.
\n\t\tWhen using managed termination protection, managed scaling must also be used\n\t\t\t\totherwise managed termination protection doesn't work.
\n\t\tWhen managed termination protection is enabled, Amazon ECS prevents the Amazon EC2 instances in\n\t\t\tan Auto Scaling group that contain tasks from being terminated during a scale-in action.\n\t\t\tThe Auto Scaling group and each instance in the Auto Scaling group must have instance\n\t\t\tprotection from scale-in actions enabled. For more information, see Instance Protection in the Auto Scaling User Guide.
\n\t\tWhen managed termination protection is disabled, your Amazon EC2 instances aren't protected\n\t\t\tfrom termination when the Auto Scaling group scales in.
" } } }, @@ -2592,14 +2601,14 @@ "subnets": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "The IDs of the subnets associated with the task or service. There's a limit of 16\n\t\t\tsubnets that can be specified per AwsVpcConfiguration
.
All specified subnets must be from the same VPC.
\nThe IDs of the subnets associated with the task or service. There's a limit of 16\n\t\t\tsubnets that can be specified per AwsVpcConfiguration
.
All specified subnets must be from the same VPC.
\n\t\tThe IDs of the security groups associated with the task or service. If you don't\n\t\t\tspecify a security group, the default security group for the VPC is used. There's a\n\t\t\tlimit of 5 security groups that can be specified per\n\t\t\tAwsVpcConfiguration
.
All specified security groups must be from the same VPC.
\nThe IDs of the security groups associated with the task or service. If you don't\n\t\t\tspecify a security group, the default security group for the VPC is used. There's a\n\t\t\tlimit of 5 security groups that can be specified per\n\t\t\tAwsVpcConfiguration
.
All specified security groups must be from the same VPC.
\n\t\tThe update status of the capacity provider. The following are the possible states that\n\t\t\tis returned.
\nThe capacity provider is in the process of being deleted.
\nThe capacity provider was successfully deleted and has an\n\t\t\t\t\t\t\tINACTIVE
status.
The capacity provider can't be deleted. The update status reason provides\n\t\t\t\t\t\tfurther details about why the delete failed.
\nThe update status of the capacity provider. The following are the possible states that\n\t\t\tis returned.
\n\t\tThe capacity provider is in the process of being deleted.
\n\t\t\t\tThe capacity provider was successfully deleted and has an\n\t\t\t\t\t\t\tINACTIVE
status.
The capacity provider can't be deleted. The update status reason provides\n\t\t\t\t\t\tfurther details about why the delete failed.
\n\t\t\t\tThe metadata that you apply to the capacity provider to help you categorize and\n\t\t\torganize it. Each tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the capacity provider to help you categorize and\n\t\t\torganize it. Each tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The weight value designates the relative percentage of the total\n\t\t\tnumber of tasks launched that should use the specified capacity provider. The\n\t\t\t\tweight
value is taken into consideration after the base
\n\t\t\tvalue, if defined, is satisfied.
If no weight
value is specified, the default value of 0
is\n\t\t\tused. When multiple capacity providers are specified within a capacity provider\n\t\t\tstrategy, at least one of the capacity providers must have a weight value greater than\n\t\t\tzero and any capacity providers with a weight of 0
can't be used to place\n\t\t\ttasks. If you specify multiple capacity providers in a strategy that all have a weight\n\t\t\tof 0
, any RunTask
or CreateService
actions using\n\t\t\tthe capacity provider strategy will fail.
An example scenario for using weights is defining a strategy that contains two\n\t\t\tcapacity providers and both have a weight of 1
, then when the\n\t\t\t\tbase
is satisfied, the tasks will be split evenly across the two\n\t\t\tcapacity providers. Using that same logic, if you specify a weight of 1
for\n\t\t\t\tcapacityProviderA and a weight of 4
for\n\t\t\t\tcapacityProviderB, then for every one task that's run using\n\t\t\t\tcapacityProviderA, four tasks would use\n\t\t\t\tcapacityProviderB.
The weight value designates the relative percentage of the total\n\t\t\tnumber of tasks launched that should use the specified capacity provider. The\n\t\t\t\tweight
value is taken into consideration after the base
\n\t\t\tvalue, if defined, is satisfied.
If no weight
value is specified, the default value of 0
is\n\t\t\tused. When multiple capacity providers are specified within a capacity provider\n\t\t\tstrategy, at least one of the capacity providers must have a weight value greater than\n\t\t\tzero and any capacity providers with a weight of 0
can't be used to place\n\t\t\ttasks. If you specify multiple capacity providers in a strategy that all have a weight\n\t\t\tof 0
, any RunTask
or CreateService
actions using\n\t\t\tthe capacity provider strategy will fail.
An example scenario for using weights is defining a strategy that contains two\n\t\t\tcapacity providers and both have a weight of 1
, then when the\n\t\t\t\tbase
is satisfied, the tasks will be split evenly across the two\n\t\t\tcapacity providers. Using that same logic, if you specify a weight of 1
for\n\t\t\t\tcapacityProviderA and a weight of 4
for\n\t\t\t\tcapacityProviderB, then for every one task that's run using\n\t\t\t\tcapacityProviderA, four tasks would use\n\t\t\t\tcapacityProviderB.
The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.
\nOnly capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE
or UPDATING
status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" + "smithy.api#documentation": "The details of a capacity provider strategy. A capacity provider strategy can be set\n\t\t\twhen using the RunTask or CreateCluster APIs or as\n\t\t\tthe default capacity provider strategy for a cluster with the CreateCluster API.
\n\t\tOnly capacity providers that are already associated with a cluster and have an\n\t\t\t\tACTIVE
or UPDATING
status can be used in a capacity\n\t\t\tprovider strategy. The PutClusterCapacityProviders API is used to\n\t\t\tassociate a capacity provider with a cluster.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New Auto Scaling group capacity providers can be\n\t\t\tcreated with the CreateCapacityProvider API operation.
\n\t\tTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be used in a\n\t\t\tcapacity provider strategy.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" } }, "com.amazonaws.ecs#CapacityProviderStrategyItemBase": { @@ -2876,7 +2885,7 @@ "status": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The status of the cluster. The following are the possible states that are\n\t\t\treturned.
\nThe cluster is ready to accept tasks and if applicable you can register\n\t\t\t\t\t\tcontainer instances with the cluster.
\nThe cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider are being created.
\nThe cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider are being deleted.
\nThe cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider have failed to create.
\nThe cluster has been deleted. Clusters with an INACTIVE
\n\t\t\t\t\t\tstatus may remain discoverable in your account for a period of time.\n\t\t\t\t\t\tHowever, this behavior is subject to change in the future. We don't\n\t\t\t\t\t\trecommend that you rely on INACTIVE
clusters persisting.
The status of the cluster. The following are the possible states that are\n\t\t\treturned.
\n\t\tThe cluster is ready to accept tasks and if applicable you can register\n\t\t\t\t\t\tcontainer instances with the cluster.
\n\t\t\t\tThe cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider are being created.
\n\t\t\t\tThe cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider are being deleted.
\n\t\t\t\tThe cluster has capacity providers that are associated with it and the\n\t\t\t\t\t\tresources needed for the capacity provider have failed to create.
\n\t\t\t\tThe cluster has been deleted. Clusters with an INACTIVE
\n\t\t\t\t\t\tstatus may remain discoverable in your account for a period of time.\n\t\t\t\t\t\tHowever, this behavior is subject to change in the future. We don't\n\t\t\t\t\t\trecommend that you rely on INACTIVE
clusters persisting.
Additional information about your clusters that are separated by launch type. They\n\t\t\tinclude the following:
\nrunningEC2TasksCount
\nRunningFargateTasksCount
\npendingEC2TasksCount
\npendingFargateTasksCount
\nactiveEC2ServiceCount
\nactiveFargateServiceCount
\ndrainingEC2ServiceCount
\ndrainingFargateServiceCount
\nAdditional information about your clusters that are separated by launch type. They\n\t\t\tinclude the following:
\n\t\trunningEC2TasksCount
\n\t\t\tRunningFargateTasksCount
\n\t\t\tpendingEC2TasksCount
\n\t\t\tpendingFargateTasksCount
\n\t\t\tactiveEC2ServiceCount
\n\t\t\tactiveFargateServiceCount
\n\t\t\tdrainingEC2ServiceCount
\n\t\t\tdrainingFargateServiceCount
\n\t\t\tThe metadata that you apply to the cluster to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the cluster to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The status of the capacity providers associated with the cluster. The following are\n\t\t\tthe states that are returned.
\nThe available capacity providers for the cluster are updating.
\nThe capacity providers have successfully updated.
\nThe capacity provider updates failed.
\nThe status of the capacity providers associated with the cluster. The following are\n\t\t\tthe states that are returned.
\n\t\tThe available capacity providers for the cluster are updating.
\n\t\t\t\tThe capacity providers have successfully updated.
\n\t\t\t\tThe capacity provider updates failed.
\n\t\t\t\tUse this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, @@ -3074,7 +3083,7 @@ } }, "traits": { - "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#ClusterServiceConnectDefaultsRequest": { @@ -3083,13 +3092,13 @@ "namespace": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify\n\t\t\ta Service Connect configuration. The namespace name can include up to 1024 characters.\n\t\t\tThe name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than\n\t\t\t(>), less than (<), or slash (/).
\nIf you enter an existing namespace name or ARN, then that namespace will be used.\n\t\t\tAny namespace type is supported. The namespace must be in this account and this Amazon Web Services\n\t\t\tRegion.
\nIf you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a\n\t\t\tCloud Map namespace with the \"API calls\" method of instance discovery only. This instance\n\t\t\tdiscovery method is the \"HTTP\" namespace type in the Command Line Interface. Other types of instance\n\t\t\tdiscovery aren't used by Service Connect.
\nIf you update the service with an empty string \"\"
for the namespace name,\n\t\t\tthe cluster configuration for Service Connect is removed. Note that the namespace will\n\t\t\tremain in Cloud Map and must be deleted separately.
For more information about Cloud Map, see Working\n\t\t\t\twith Services in the Cloud Map Developer Guide.
", + "smithy.api#documentation": "The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace that's used when you create a service and don't specify\n\t\t\ta Service Connect configuration. The namespace name can include up to 1024 characters.\n\t\t\tThe name is case-sensitive. The name can't include hyphens (-), tilde (~), greater than\n\t\t\t(>), less than (<), or slash (/).
\n\t\tIf you enter an existing namespace name or ARN, then that namespace will be used.\n\t\t\tAny namespace type is supported. The namespace must be in this account and this Amazon Web Services\n\t\t\tRegion.
\n\t\tIf you enter a new name, a Cloud Map namespace will be created. Amazon ECS creates a\n\t\t\tCloud Map namespace with the \"API calls\" method of instance discovery only. This instance\n\t\t\tdiscovery method is the \"HTTP\" namespace type in the Command Line Interface. Other types of instance\n\t\t\tdiscovery aren't used by Service Connect.
\n\t\tIf you update the service with an empty string \"\"
for the namespace name,\n\t\t\tthe cluster configuration for Service Connect is removed. Note that the namespace will\n\t\t\tremain in Cloud Map and must be deleted separately.
For more information about Cloud Map, see Working\n\t\t\t\twith Services in the Cloud Map Developer Guide.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#ClusterSetting": { @@ -3211,7 +3220,7 @@ "imageDigest": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The container image manifest digest.
\nThe imageDigest
is only returned if the container is using an image\n\t\t\t\thosted in Amazon ECR, otherwise it is omitted.
The container image manifest digest.
\n\t\tThe imageDigest
is only returned if the container is using an image\n\t\t\t\thosted in Amazon ECR, otherwise it is omitted.
The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n repository-url/image:tag\n
or \n repository-url/image@digest\n
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\tIMAGE
parameter of docker\n\t\t\t\trun.
When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.
\nImages in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag
or\n\t\t\t\t\t\tregistry/repository@digest
. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr.
\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr.
.\n\t\t\t\t
Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu
).
The image used to start a container. This string is passed directly to the Docker\n\t\t\tdaemon. By default, images in the Docker Hub registry are available. Other repositories\n\t\t\tare specified with either \n\t\t\t\t repository-url/image:tag\n\t\t\t
or \n\t\t\t\t repository-url/image@digest\n\t\t\t
. Up to 255 letters (uppercase and lowercase), numbers, hyphens, underscores, colons, periods, forward slashes, and number signs are allowed. This parameter maps to Image
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\tIMAGE
parameter of docker\n\t\t\t\trun.
When a new task starts, the Amazon ECS container agent pulls the latest version of\n\t\t\t\t\tthe specified image and tag for the container to use. However, subsequent\n\t\t\t\t\tupdates to a repository image aren't propagated to already running tasks.
\n\t\t\tImages in Amazon ECR repositories can be specified by either using the full\n\t\t\t\t\t\tregistry/repository:tag
or\n\t\t\t\t\t\tregistry/repository@digest
. For example,\n\t\t\t\t\t\t012345678910.dkr.ecr.
\n\t\t\t\t\tor\n\t\t\t\t\t\t012345678910.dkr.ecr.
.\n\t\t\t\t
Images in official repositories on Docker Hub use a single name (for example,\n\t\t\t\t\t\tubuntu
or mongo
).
Images in other repositories on Docker Hub are qualified with an organization\n\t\t\t\t\tname (for example, amazon/amazon-ecs-agent
).
Images in other online repositories are qualified further by a domain name\n\t\t\t\t\t(for example, quay.io/assemblyline/ubuntu
).
The number of cpu
units reserved for the container. This parameter maps\n\t\t\tto CpuShares
in the Create a container section of the\n\t\t\tDocker Remote API and the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.
\nLinux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.
\nOn Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. For more\n\t\t\tinformation, see CPU share\n\t\t\t\tconstraint in the Docker documentation. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 in your container definitions. For CPU values below 2\n\t\t\t(including null), the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:
\n\n Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.
\n\n Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.
\nOn Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0
, which Windows interprets as 1% of one CPU.
The number of cpu
units reserved for the container. This parameter maps\n\t\t\tto CpuShares
in the Create a container section of the\n\t\t\tDocker Remote API and the --cpu-shares
option to docker run.
This field is optional for tasks using the Fargate launch type, and the\n\t\t\tonly requirement is that the total amount of CPU reserved for all containers within a\n\t\t\ttask be lower than the task-level cpu
value.
You can determine the number of CPU units that are available per EC2 instance type\n\t\t\t\tby multiplying the vCPUs listed for that instance type on the Amazon EC2 Instances detail page\n\t\t\t\tby 1,024.
\n\t\tLinux containers share unallocated CPU units with other containers on the container\n\t\t\tinstance with the same ratio as their allocated amount. For example, if you run a\n\t\t\tsingle-container task on a single-core instance type with 512 CPU units specified for\n\t\t\tthat container, and that's the only task running on the container instance, that\n\t\t\tcontainer could use the full 1,024 CPU unit share at any given time. However, if you\n\t\t\tlaunched another copy of the same task on that container instance, each task is\n\t\t\tguaranteed a minimum of 512 CPU units when needed. Moreover, each container could float\n\t\t\tto higher CPU usage if the other container was not using it. If both tasks were 100%\n\t\t\tactive all of the time, they would be limited to 512 CPU units.
\n\t\tOn Linux container instances, the Docker daemon on the container instance uses the CPU\n\t\t\tvalue to calculate the relative CPU share ratios for running containers. For more\n\t\t\tinformation, see CPU share\n\t\t\t\tconstraint in the Docker documentation. The minimum valid CPU share value\n\t\t\tthat the Linux kernel allows is 2. However, the CPU parameter isn't required, and you\n\t\t\tcan use CPU values below 2 in your container definitions. For CPU values below 2\n\t\t\t(including null), the behavior varies based on your Amazon ECS container agent\n\t\t\tversion:
\n\t\t\n\t\t\t\t\t Agent versions less than or equal to 1.1.0:\n\t\t\t\t\tNull and zero CPU values are passed to Docker as 0, which Docker then converts\n\t\t\t\t\tto 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux\n\t\t\t\t\tkernel converts to two CPU shares.
\n\t\t\t\n\t\t\t\t\t Agent versions greater than or equal to 1.2.0:\n\t\t\t\t\tNull, zero, and CPU values of 1 are passed to Docker as 2.
\n\t\t\tOn Windows container instances, the CPU limit is enforced as an absolute limit, or a\n\t\t\tquota. Windows containers only have access to the specified amount of CPU that's\n\t\t\tdescribed in the task definition. A null or zero CPU value is passed to Docker as\n\t\t\t\t0
, which Windows interprets as 1% of one CPU.
The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory
value, if one is specified. This parameter maps to\n\t\t\t\tMemory
in the Create a container section of the\n\t\t\tDocker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
\nIf using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory
and memoryReservation
value, memory
\n\t\t\tmust be greater than memoryReservation
. If you specify\n\t\t\t\tmemoryReservation
, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" + "smithy.api#documentation": "The amount (in MiB) of memory to present to the container. If your container attempts\n\t\t\tto exceed the memory specified here, the container is killed. The total amount of memory\n\t\t\treserved for all containers within a task must be lower than the task\n\t\t\t\tmemory
value, if one is specified. This parameter maps to\n\t\t\t\tMemory
in the Create a container section of the\n\t\t\tDocker Remote API and the --memory
option to docker run.
If using the Fargate launch type, this parameter is optional.
\n\t\tIf using the EC2 launch type, you must specify either a task-level\n\t\t\tmemory value or a container-level memory value. If you specify both a container-level\n\t\t\t\tmemory
and memoryReservation
value, memory
\n\t\t\tmust be greater than memoryReservation
. If you specify\n\t\t\t\tmemoryReservation
, then that value is subtracted from the available\n\t\t\tmemory resources for the container instance where the container is placed. Otherwise,\n\t\t\tthe value of memory
is used.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\n\t\tThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" } }, "memoryReservation": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory
parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation
in the Create a container section of\n\t\t\tthe Docker Remote API and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory
or memoryReservation
in a container\n\t\t\tdefinition. If you specify both, memory
must be greater than\n\t\t\t\tmemoryReservation
. If you specify memoryReservation
, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory
is\n\t\t\tused.
For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation
of 128 MiB, and a memory
hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\nThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" + "smithy.api#documentation": "The soft limit (in MiB) of memory to reserve for the container. When system memory is\n\t\t\tunder heavy contention, Docker attempts to keep the container memory to this soft limit.\n\t\t\tHowever, your container can consume more memory when it needs to, up to either the hard\n\t\t\tlimit specified with the memory
parameter (if applicable), or all of the\n\t\t\tavailable memory on the container instance, whichever comes first. This parameter maps\n\t\t\tto MemoryReservation
in the Create a container section of\n\t\t\tthe Docker Remote API and the --memory-reservation
option to docker run.
If a task-level memory value is not specified, you must specify a non-zero integer for\n\t\t\tone or both of memory
or memoryReservation
in a container\n\t\t\tdefinition. If you specify both, memory
must be greater than\n\t\t\t\tmemoryReservation
. If you specify memoryReservation
, then\n\t\t\tthat value is subtracted from the available memory resources for the container instance\n\t\t\twhere the container is placed. Otherwise, the value of memory
is\n\t\t\tused.
For example, if your container normally uses 128 MiB of memory, but occasionally\n\t\t\tbursts to 256 MiB of memory for short periods of time, you can set a\n\t\t\t\tmemoryReservation
of 128 MiB, and a memory
hard limit of\n\t\t\t300 MiB. This configuration would allow the container to only reserve 128 MiB of memory\n\t\t\tfrom the remaining resources on the container instance, but also allow the container to\n\t\t\tconsume more memory resources when needed.
The Docker 20.10.0 or later daemon reserves a minimum of 6 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 6 MiB of memory for your containers.
\n\t\tThe Docker 19.03.13-ce or earlier daemon reserves a minimum of 4 MiB of memory for a\n\t\t\tcontainer. So, don't specify less than 4 MiB of memory for your containers.
" } }, "links": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "The links
parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge
. The name:internalName
\n\t\t\tconstruct is analogous to name:alias
in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to\n\t\t\t\tLegacy container links\n\t\t\tin the Docker documentation. This parameter maps to Links
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--link
option to docker\n\t\t\trun.
This parameter is not supported for Windows containers.
\nContainers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.
\nThe links
parameter allows containers to communicate with each other\n\t\t\twithout the need for port mappings. This parameter is only supported if the network mode\n\t\t\tof a task definition is bridge
. The name:internalName
\n\t\t\tconstruct is analogous to name:alias
in Docker links.\n\t\t\tUp to 255 letters (uppercase and lowercase), numbers, underscores, and hyphens are allowed. For more information about linking Docker containers, go to\n\t\t\t\tLegacy container links\n\t\t\tin the Docker documentation. This parameter maps to Links
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--link
option to docker\n\t\t\trun.
This parameter is not supported for Windows containers.
\nContainers that are collocated on a single container instance may be able to\n\t\t\t\tcommunicate with each other without requiring links or host port mappings. Network\n\t\t\t\tisolation is achieved on the container instance using security groups and VPC\n\t\t\t\tsettings.
\n\t\tThe list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.
\nFor task definitions that use the awsvpc
network mode, only specify the\n\t\t\t\tcontainerPort
. The hostPort
can be left blank or it must\n\t\t\tbe the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than\n\t\t\t\tlocalhost
. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--publish
option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none
,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost
, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings
section DescribeTasks\n\t\t\t\tresponses.
The list of port mappings for the container. Port mappings allow containers to access\n\t\t\tports on the host container instance to send or receive traffic.
\n\t\tFor task definitions that use the awsvpc
network mode, only specify the\n\t\t\t\tcontainerPort
. The hostPort
can be left blank or it must\n\t\t\tbe the same value as the containerPort
.
Port mappings on Windows use the NetNAT
gateway address rather than\n\t\t\t\tlocalhost
. There's no loopback for port mappings on Windows, so you\n\t\t\tcan't access a container's mapped port from the host itself.
This parameter maps to PortBindings
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--publish
option to docker\n\t\t\t\trun. If the network mode of a task definition is set to none
,\n\t\t\tthen you can't specify port mappings. If the network mode of a task definition is set to\n\t\t\t\thost
, then host ports must either be undefined or they must match the\n\t\t\tcontainer port in the port mapping.
After a task reaches the RUNNING
status, manual and automatic host\n\t\t\t\tand container port assignments are visible in the Network\n\t\t\t\t\tBindings section of a container description for a selected task in\n\t\t\t\tthe Amazon ECS console. The assignments are also visible in the\n\t\t\t\t\tnetworkBindings
section DescribeTasks\n\t\t\t\tresponses.
If the essential
parameter of a container is marked as true
,\n\t\t\tand that container fails or stops for any reason, all other containers that are part of\n\t\t\tthe task are stopped. If the essential
parameter of a container is marked\n\t\t\tas false
, its failure doesn't affect the rest of the containers in a task.\n\t\t\tIf this parameter is omitted, a container is assumed to be essential.
All tasks must have at least one essential container. If you have an application\n\t\t\tthat's composed of multiple containers, group containers that are used for a common\n\t\t\tpurpose into components, and separate the different components into multiple task\n\t\t\tdefinitions. For more information, see Application\n\t\t\t\tArchitecture in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "If the essential
parameter of a container is marked as true
,\n\t\t\tand that container fails or stops for any reason, all other containers that are part of\n\t\t\tthe task are stopped. If the essential
parameter of a container is marked\n\t\t\tas false
, its failure doesn't affect the rest of the containers in a task.\n\t\t\tIf this parameter is omitted, a container is assumed to be essential.
All tasks must have at least one essential container. If you have an application\n\t\t\tthat's composed of multiple containers, group containers that are used for a common\n\t\t\tpurpose into components, and separate the different components into multiple task\n\t\t\tdefinitions. For more information, see Application\n\t\t\t\tArchitecture in the Amazon Elastic Container Service Developer Guide.
" } }, "entryPoint": { "target": "com.amazonaws.ecs#StringList", "traits": { - "smithy.api#documentation": "Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint
parameters. If you have problems using\n\t\t\t\t\tentryPoint
, update your container agent or enter your commands and\n\t\t\t\targuments as command
array items instead.
The entry point that's passed to the container. This parameter maps to\n\t\t\t\tEntrypoint
in the Create a container section of the\n\t\t\tDocker Remote API and the --entrypoint
option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.
Early versions of the Amazon ECS container agent don't properly handle\n\t\t\t\t\tentryPoint
parameters. If you have problems using\n\t\t\t\t\tentryPoint
, update your container agent or enter your commands and\n\t\t\t\targuments as command
array items instead.
The entry point that's passed to the container. This parameter maps to\n\t\t\t\tEntrypoint
in the Create a container section of the\n\t\t\tDocker Remote API and the --entrypoint
option to docker run. For more information, see https://docs.docker.com/engine/reference/builder/#entrypoint.
The environment variables to pass to a container. This parameter maps to\n\t\t\t\tEnv
in the Create a container section of the\n\t\t\tDocker Remote API and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.
\nThe environment variables to pass to a container. This parameter maps to\n\t\t\t\tEnv
in the Create a container section of the\n\t\t\tDocker Remote API and the --env
option to docker run.
We don't recommend that you use plaintext environment variables for sensitive\n\t\t\t\tinformation, such as credential data.
\n\t\tA list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored. For more information about the environment variable file\n\t\t\tsyntax, see Declare default\n\t\t\t\tenvironment variables in file.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.
A list of files containing the environment variables to pass to a container. This\n\t\t\tparameter maps to the --env-file
option to docker run.
You can specify up to ten environment files. The file must have a .env
\n\t\t\tfile extension. Each line in an environment file contains an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored. For more information about the environment variable file\n\t\t\tsyntax, see Declare default\n\t\t\t\tenvironment variables in file.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying Environment\n\t\t\t\tVariables in the Amazon Elastic Container Service Developer Guide.
The mount points for data volumes in your container.
\nThis parameter maps to Volumes
in the Create a container\n\t\t\tsection of the Docker Remote API and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData
. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.
The mount points for data volumes in your container.
\n\t\tThis parameter maps to Volumes
in the Create a container\n\t\t\tsection of the Docker Remote API and the --volume
option to docker run.
Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData
. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives.
Linux-specific modifications that are applied to the container, such as Linux kernel\n\t\t\tcapabilities. For more information see KernelCapabilities.
\nThis parameter is not supported for Windows containers.
\nLinux-specific modifications that are applied to the container, such as Linux kernel\n\t\t\tcapabilities. For more information see KernelCapabilities.
\n\t\tThis parameter is not supported for Windows containers.
\n\t\tThe dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies on other containers in a task definition. When a dependency is\n\t\t\tdefined for container startup, for container shutdown it is reversed.
\nFor tasks using the EC2 launch type, the container instances require at\n\t\t\tleast version 1.26.0 of the container agent to turn on container dependencies. However,\n\t\t\twe recommend using the latest container agent version. For information about checking\n\t\t\tyour agent version and updating to the latest version, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init
package. If\n\t\t\tyour container instances are launched from version 20190301
or later, then\n\t\t\tthey contain the required versions of the container agent and ecs-init
. For\n\t\t\tmore information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\nLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies on other containers in a task definition. When a dependency is\n\t\t\tdefined for container startup, for container shutdown it is reversed.
\n\t\tFor tasks using the EC2 launch type, the container instances require at\n\t\t\tleast version 1.26.0 of the container agent to turn on container dependencies. However,\n\t\t\twe recommend using the latest container agent version. For information about checking\n\t\t\tyour agent version and updating to the latest version, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init
package. If\n\t\t\tyour container instances are launched from version 20190301
or later, then\n\t\t\tthey contain the required versions of the container agent and ecs-init
. For\n\t\t\tmore information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\n\t\tLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE
,\n\t\t\tSUCCESS
, or HEALTHY
status. If a startTimeout
\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED
state.
When the ECS_CONTAINER_START_TIMEOUT
container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\nLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0
of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1
of the ecs-init
\n\t\t\tpackage. If your container instances are launched from version 20190301
or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init
. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before giving up on resolving dependencies for a\n\t\t\tcontainer. For example, you specify two containers in a task definition with containerA\n\t\t\thaving a dependency on containerB reaching a COMPLETE
,\n\t\t\tSUCCESS
, or HEALTHY
status. If a startTimeout
\n\t\t\tvalue is specified for containerB and it doesn't reach the desired status within that\n\t\t\ttime then containerA gives up and not start. This results in the task transitioning to a\n\t\t\t\tSTOPPED
state.
When the ECS_CONTAINER_START_TIMEOUT
container agent configuration\n\t\t\t\tvariable is used, it's enforced independently from this start timeout value.
For tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\n\t\tLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
For tasks using the EC2 launch type, your container instances require at\n\t\t\tleast version 1.26.0
of the container agent to use a container start\n\t\t\ttimeout value. However, we recommend using the latest container agent version. For\n\t\t\tinformation about checking your agent version and updating to the latest version, see\n\t\t\t\tUpdating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1
of the ecs-init
\n\t\t\tpackage. If your container instances are launched from version 20190301
or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init
. For more information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.
\nFor tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\nLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.
\nFor tasks that use the EC2 launch type, if the stopTimeout
\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the\n\t\t\t\tstopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop timeout\n\t\t\tvalue. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init
package. If your container instances are launched from version\n\t\t\t\t20190301
or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
Time duration (in seconds) to wait before the container is forcefully killed if it\n\t\t\tdoesn't exit normally on its own.
\n\t\tFor tasks using the Fargate launch type, the task or service requires\n\t\t\tthe following platforms:
\n\t\tLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The max stop timeout value is 120 seconds and if the parameter is not specified, the\n\t\t\tdefault value of 30 seconds is used.
\n\t\tFor tasks that use the EC2 launch type, if the stopTimeout
\n\t\t\tparameter isn't specified, the value set for the Amazon ECS container agent configuration\n\t\t\tvariable ECS_CONTAINER_STOP_TIMEOUT
is used. If neither the\n\t\t\t\tstopTimeout
parameter or the ECS_CONTAINER_STOP_TIMEOUT
\n\t\t\tagent configuration variable are set, then the default values of 30 seconds for Linux\n\t\t\tcontainers and 30 seconds on Windows containers are used. Your container instances\n\t\t\trequire at least version 1.26.0 of the container agent to use a container stop timeout\n\t\t\tvalue. However, we recommend using the latest container agent version. For information\n\t\t\tabout checking your agent version and updating to the latest version, see Updating the Amazon ECS Container Agent in the Amazon Elastic Container Service Developer Guide. If you're using\n\t\t\tan Amazon ECS-optimized Linux AMI, your instance needs at least version 1.26.0-1 of the\n\t\t\t\tecs-init
package. If your container instances are launched from version\n\t\t\t\t20190301
or later, then they contain the required versions of the\n\t\t\tcontainer agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The hostname to use for your container. This parameter maps to Hostname
\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--hostname
option to docker\n\t\t\t\trun.
The hostname
parameter is not supported if you're using the\n\t\t\t\t\tawsvpc
network mode.
The hostname to use for your container. This parameter maps to Hostname
\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--hostname
option to docker\n\t\t\t\trun.
The hostname
parameter is not supported if you're using the\n\t\t\t\t\tawsvpc
network mode.
The user to use inside the container. This parameter maps to User
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--user
option to docker\n\t\t\trun.
When running tasks using the host
network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.
You can specify the user
using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.
\n user
\n
\n user:group
\n
\n uid
\n
\n uid:gid
\n
\n user:gid
\n
\n uid:group
\n
This parameter is not supported for Windows containers.
\nThe user to use inside the container. This parameter maps to User
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--user
option to docker\n\t\t\trun.
When running tasks using the host
network mode, don't run containers\n\t\t\t\tusing the root user (UID 0). We recommend using a non-root user for better\n\t\t\t\tsecurity.
You can specify the user
using the following formats. If specifying a UID\n\t\t\tor GID, you must specify it as a positive integer.
\n user
\n
\n user:group
\n
\n uid
\n
\n uid:gid
\n
\n user:gid
\n
\n uid:group
\n
This parameter is not supported for Windows containers.
\nWhen this parameter is true, networking is disabled within the container. This\n\t\t\tparameter maps to NetworkDisabled
in the Create a container\n\t\t\tsection of the Docker Remote API.
This parameter is not supported for Windows containers.
\nWhen this parameter is true, networking is disabled within the container. This\n\t\t\tparameter maps to NetworkDisabled
in the Create a container\n\t\t\tsection of the Docker Remote API.
This parameter is not supported for Windows containers.
\nWhen this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root
user). This parameter maps to\n\t\t\t\tPrivileged
in the Create a container section of the\n\t\t\tDocker Remote API and the --privileged
option to docker run.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nWhen this parameter is true, the container is given elevated privileges on the host\n\t\t\tcontainer instance (similar to the root
user). This parameter maps to\n\t\t\t\tPrivileged
in the Create a container section of the\n\t\t\tDocker Remote API and the --privileged
option to docker run.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nWhen this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--read-only
option to docker\n\t\t\t\trun.
This parameter is not supported for Windows containers.
\nWhen this parameter is true, the container is given read-only access to its root file\n\t\t\tsystem. This parameter maps to ReadonlyRootfs
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--read-only
option to docker\n\t\t\t\trun.
This parameter is not supported for Windows containers.
\nA list of DNS servers that are presented to the container. This parameter maps to\n\t\t\t\tDns
in the Create a container section of the\n\t\t\tDocker Remote API and the --dns
option to docker run.
This parameter is not supported for Windows containers.
\nA list of DNS servers that are presented to the container. This parameter maps to\n\t\t\t\tDns
in the Create a container section of the\n\t\t\tDocker Remote API and the --dns
option to docker run.
This parameter is not supported for Windows containers.
\nA list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch
in the Create a container section of the\n\t\t\tDocker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
\nA list of DNS search domains that are presented to the container. This parameter maps\n\t\t\tto DnsSearch
in the Create a container section of the\n\t\t\tDocker Remote API and the --dns-search
option to docker run.
This parameter is not supported for Windows containers.
\nA list of hostnames and IP address mappings to append to the /etc/hosts
\n\t\t\tfile on the container. This parameter maps to ExtraHosts
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--add-host
option to docker\n\t\t\t\trun.
This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc
network mode.
A list of hostnames and IP address mappings to append to the /etc/hosts
\n\t\t\tfile on the container. This parameter maps to ExtraHosts
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--add-host
option to docker\n\t\t\t\trun.
This parameter isn't supported for Windows containers or tasks that use the\n\t\t\t\t\tawsvpc
network mode.
A list of strings to provide custom labels for SELinux and AppArmor multi-level\n\t\t\tsecurity systems. This field isn't valid for containers in tasks using the\n\t\t\tFargate launch type.
\nWith Windows containers, this parameter can be used to reference a credential spec\n\t\t\tfile when configuring a container for Active Directory authentication. For more\n\t\t\tinformation, see Using gMSAs for Windows\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.
\nThis parameter maps to SecurityOpt
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--security-opt
option to docker\n\t\t\t\trun.
The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
For more information about valid values, see Docker\n\t\t\t\tRun Security Configuration.
\nValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"
" + "smithy.api#documentation": "A list of strings to provide custom labels for SELinux and AppArmor multi-level\n\t\t\tsecurity systems. This field isn't valid for containers in tasks using the\n\t\t\tFargate launch type.
\n\t\tWith Windows containers, this parameter can be used to reference a credential spec\n\t\t\tfile when configuring a container for Active Directory authentication. For more\n\t\t\tinformation, see Using gMSAs for Windows\n\t\t\t\tContainers in the Amazon Elastic Container Service Developer Guide.
\n\t\tThis parameter maps to SecurityOpt
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--security-opt
option to docker\n\t\t\t\trun.
The Amazon ECS container agent running on a container instance must register with the\n\t\t\t\t\tECS_SELINUX_CAPABLE=true
or ECS_APPARMOR_CAPABLE=true
\n\t\t\t\tenvironment variables before containers placed on that instance can use these\n\t\t\t\tsecurity options. For more information, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
For more information about valid values, see Docker\n\t\t\t\tRun Security Configuration.
\n\t\tValid values: \"no-new-privileges\" | \"apparmor:PROFILE\" | \"label:value\" |\n\t\t\t\"credentialspec:CredentialSpecFilePath\"
" } }, "interactive": { @@ -3519,19 +3528,19 @@ "dockerLabels": { "target": "com.amazonaws.ecs#DockerLabelsMap", "traits": { - "smithy.api#documentation": "A key/value map of labels to add to the container. This parameter maps to\n\t\t\t\tLabels
in the Create a container section of the\n\t\t\tDocker Remote API and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
A key/value map of labels to add to the container. This parameter maps to\n\t\t\t\tLabels
in the Create a container section of the\n\t\t\tDocker Remote API and the --label
option to docker run. This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
A list of ulimits
to set in the container. If a ulimit
value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits
in the Create a container section\n\t\t\tof the Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 1024
and hard limit\n\t\t\t\t\t\t\tis 4096
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
This parameter is not supported for Windows containers.
\nA list of ulimits
to set in the container. If a ulimit
value\n\t\t\tis specified in a task definition, it overrides the default values set by Docker. This\n\t\t\tparameter maps to Ulimits
in the Create a container section\n\t\t\tof the Docker Remote API and the --ulimit
option to docker run. Valid naming values are displayed\n\t\t\tin the Ulimit data type.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 1024
and hard limit\n\t\t\t\t\t\t\tis 4096
.
This parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
This parameter is not supported for Windows containers.
\nThe log configuration specification for the container.
\nThis parameter maps to LogConfig
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--log-driver
option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions). For more information about the options for different supported log drivers,\n\t\t\tsee Configure\n\t\t\t\tlogging drivers in the Docker documentation.
Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.
\nThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
The log configuration specification for the container.
\n\t\tThis parameter maps to LogConfig
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--log-driver
option to docker\n\t\t\t\trun. By default, containers use the same logging driver that the Docker\n\t\t\tdaemon uses. However the container can use a different logging driver than the Docker\n\t\t\tdaemon by specifying a log driver with this parameter in the container definition. To\n\t\t\tuse a different logging driver for a container, the log system must be configured\n\t\t\tproperly on the container instance (or on a different log server for remote logging\n\t\t\toptions). For more information about the options for different supported log drivers,\n\t\t\tsee Configure\n\t\t\t\tlogging drivers in the Docker documentation.
Amazon ECS currently supports a subset of the logging drivers available to the Docker\n\t\t\t\tdaemon (shown in the LogConfiguration data type). Additional log\n\t\t\t\tdrivers may be available in future releases of the Amazon ECS container agent.
\n\t\tThis parameter requires version 1.18 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
The Amazon ECS container agent running on a container instance must register the\n\t\t\t\tlogging drivers available on that instance with the\n\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\tcontainers placed on that instance can use these log configuration options. For more\n\t\t\t\tinformation, see Amazon ECS Container\n\t\t\t\t\tAgent Configuration in the Amazon Elastic Container Service Developer Guide.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls
in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl
option to docker run.
We don't recommended that you specify network-related systemControls
\n\t\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\t\tawsvpc
or host
network modes. For tasks that use the\n\t\t\t\t\tawsvpc
network mode, the container that's started last determines\n\t\t\t\twhich systemControls
parameters take effect. For tasks that use the\n\t\t\t\t\thost
network mode, it changes the container instance's namespaced\n\t\t\t\tkernel parameters as well as the containers.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls
in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl
option to docker run.
We don't recommended that you specify network-related systemControls
\n\t\t\t\tparameters for multiple containers in a single task that also uses either the\n\t\t\t\t\tawsvpc
or host
network modes. For tasks that use the\n\t\t\t\t\tawsvpc
network mode, the container that's started last determines\n\t\t\t\twhich systemControls
parameters take effect. For tasks that use the\n\t\t\t\t\thost
network mode, it changes the container instance's namespaced\n\t\t\t\tkernel parameters as well as the containers.
The dependency condition of the container. The following are the available conditions\n\t\t\tand their behavior:
\n\n START
- This condition emulates the behavior of links and\n\t\t\t\t\tvolumes today. It validates that a dependent container is started before\n\t\t\t\t\tpermitting other containers to start.
\n COMPLETE
- This condition validates that a dependent\n\t\t\t\t\tcontainer runs to completion (exits) before permitting other containers to\n\t\t\t\t\tstart. This can be useful for nonessential containers that run a script and then\n\t\t\t\t\texit. This condition can't be set on an essential container.
\n SUCCESS
- This condition is the same as\n\t\t\t\t\t\tCOMPLETE
, but it also requires that the container exits with a\n\t\t\t\t\t\tzero
status. This condition can't be set on an essential\n\t\t\t\t\tcontainer.
\n HEALTHY
- This condition validates that the dependent\n\t\t\t\t\tcontainer passes its Docker health check before permitting other containers to\n\t\t\t\t\tstart. This requires that the dependent container has health checks configured.\n\t\t\t\t\tThis condition is confirmed only at task startup.
The dependency condition of the container. The following are the available conditions\n\t\t\tand their behavior:
\n\t\t\n START
- This condition emulates the behavior of links and\n\t\t\t\t\tvolumes today. It validates that a dependent container is started before\n\t\t\t\t\tpermitting other containers to start.
\n COMPLETE
- This condition validates that a dependent\n\t\t\t\t\tcontainer runs to completion (exits) before permitting other containers to\n\t\t\t\t\tstart. This can be useful for nonessential containers that run a script and then\n\t\t\t\t\texit. This condition can't be set on an essential container.
\n SUCCESS
- This condition is the same as\n\t\t\t\t\t\tCOMPLETE
, but it also requires that the container exits with a\n\t\t\t\t\t\tzero
status. This condition can't be set on an essential\n\t\t\t\t\tcontainer.
\n HEALTHY
- This condition validates that the dependent\n\t\t\t\t\tcontainer passes its Docker health check before permitting other containers to\n\t\t\t\t\tstart. This requires that the dependent container has health checks configured.\n\t\t\t\t\tThis condition is confirmed only at task startup.
The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.
\nYour Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tto use container dependencies. However, we recommend using the latest container agent\n\t\t\tversion. For information about checking your agent version and updating to the latest\n\t\t\tversion, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init
package. If\n\t\t\tyour container instances are launched from version 20190301
or later, then\n\t\t\tthey contain the required versions of the container agent and ecs-init
. For\n\t\t\tmore information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
For tasks that use the Fargate launch type, the task or service\n\t\t\t\trequires the following platforms:
\nLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The dependencies defined for container startup and shutdown. A container can contain\n\t\t\tmultiple dependencies. When a dependency is defined for container startup, for container\n\t\t\tshutdown it is reversed.
\n\t\tYour Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tto use container dependencies. However, we recommend using the latest container agent\n\t\t\tversion. For information about checking your agent version and updating to the latest\n\t\t\tversion, see Updating the Amazon ECS\n\t\t\t\tContainer Agent in the Amazon Elastic Container Service Developer Guide. If you're using an Amazon ECS-optimized Linux AMI,\n\t\t\tyour instance needs at least version 1.26.0-1 of the ecs-init
package. If\n\t\t\tyour container instances are launched from version 20190301
or later, then\n\t\t\tthey contain the required versions of the container agent and ecs-init
. For\n\t\t\tmore information, see Amazon ECS-optimized Linux AMI\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
For tasks that use the Fargate launch type, the task or service\n\t\t\t\trequires the following platforms:
\n\t\t\tLinux platform version 1.3.0
or later.
Windows platform version 1.0.0
or later.
The status of the container instance. The valid values are REGISTERING
,\n\t\t\t\tREGISTRATION_FAILED
, ACTIVE
, INACTIVE
,\n\t\t\t\tDEREGISTERING
, or DRAINING
.
If your account has opted in to the awsvpcTrunking
account setting, then\n\t\t\tany newly registered container instance will transition to a REGISTERING
\n\t\t\tstatus while the trunk elastic network interface is provisioned for the instance. If the\n\t\t\tregistration fails, the instance will transition to a REGISTRATION_FAILED
\n\t\t\tstatus. You can describe the container instance and see the reason for failure in the\n\t\t\t\tstatusReason
parameter. Once the container instance is terminated, the\n\t\t\tinstance transitions to a DEREGISTERING
status while the trunk elastic\n\t\t\tnetwork interface is deprovisioned. The instance then transitions to an\n\t\t\t\tINACTIVE
status.
The ACTIVE
status indicates that the container instance can accept tasks.\n\t\t\tThe DRAINING
indicates that new tasks aren't placed on the container\n\t\t\tinstance and any service tasks running on the container instance are removed if\n\t\t\tpossible. For more information, see Container instance draining in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
The status of the container instance. The valid values are REGISTERING
,\n\t\t\t\tREGISTRATION_FAILED
, ACTIVE
, INACTIVE
,\n\t\t\t\tDEREGISTERING
, or DRAINING
.
If your account has opted in to the awsvpcTrunking
account setting, then\n\t\t\tany newly registered container instance will transition to a REGISTERING
\n\t\t\tstatus while the trunk elastic network interface is provisioned for the instance. If the\n\t\t\tregistration fails, the instance will transition to a REGISTRATION_FAILED
\n\t\t\tstatus. You can describe the container instance and see the reason for failure in the\n\t\t\t\tstatusReason
parameter. Once the container instance is terminated, the\n\t\t\tinstance transitions to a DEREGISTERING
status while the trunk elastic\n\t\t\tnetwork interface is deprovisioned. The instance then transitions to an\n\t\t\t\tINACTIVE
status.
The ACTIVE
status indicates that the container instance can accept tasks.\n\t\t\tThe DRAINING
indicates that new tasks aren't placed on the container\n\t\t\tinstance and any service tasks running on the container instance are removed if\n\t\t\tpossible. For more information, see Container instance draining in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
The metadata that you apply to the container instance to help you categorize and\n\t\t\torganize them. Each tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the container instance to help you categorize and\n\t\t\torganize them. Each tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
Creates a new capacity provider. Capacity providers are associated with an Amazon ECS\n\t\t\tcluster and are used in capacity provider strategies to facilitate cluster auto\n\t\t\tscaling.
\nOnly capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on\n\t\t\tFargate use the FARGATE
and FARGATE_SPOT
capacity providers.\n\t\t\tThese providers are available to all accounts in the Amazon Web Services Regions that Fargate\n\t\t\tsupports.
Creates a new capacity provider. Capacity providers are associated with an Amazon ECS\n\t\t\tcluster and are used in capacity provider strategies to facilitate cluster auto\n\t\t\tscaling.
\n\t\tOnly capacity providers that use an Auto Scaling group can be created. Amazon ECS tasks on\n\t\t\tFargate use the FARGATE
and FARGATE_SPOT
capacity providers.\n\t\t\tThese providers are available to all accounts in the Amazon Web Services Regions that Fargate\n\t\t\tsupports.
The metadata that you apply to the capacity provider to categorize and organize them\n\t\t\tmore conveniently. Each tag consists of a key and an optional value. You define both of\n\t\t\tthem.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the capacity provider to categorize and organize them\n\t\t\tmore conveniently. Each tag consists of a key and an optional value. You define both of\n\t\t\tthem.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
Creates a new Amazon ECS cluster. By default, your account receives a default
\n\t\t\tcluster when you launch your first container instance. However, you can create your own\n\t\t\tcluster with a unique name with the CreateCluster
action.
When you call the CreateCluster API operation, Amazon ECS attempts to\n\t\t\t\tcreate the Amazon ECS service-linked role for your account. This is so that it can manage\n\t\t\t\trequired resources in other Amazon Web Services services on your behalf. However, if the IAM user\n\t\t\t\tthat makes the call doesn't have permissions to create the service-linked role, it\n\t\t\t\tisn't created. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
\nCreates a new Amazon ECS cluster. By default, your account receives a default
\n\t\t\tcluster when you launch your first container instance. However, you can create your own\n\t\t\tcluster with a unique name with the CreateCluster
action.
When you call the CreateCluster API operation, Amazon ECS attempts to\n\t\t\t\tcreate the Amazon ECS service-linked role for your account. This is so that it can manage\n\t\t\t\trequired resources in other Amazon Web Services services on your behalf. However, if the IAM user\n\t\t\t\tthat makes the call doesn't have permissions to create the service-linked role, it\n\t\t\t\tisn't created. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
\n\t\tThe metadata that you apply to the cluster to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the cluster to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The short name of one or more capacity providers to associate with the cluster. A\n\t\t\tcapacity provider must be associated with a cluster before it can be included as part of\n\t\t\tthe default capacity provider strategy of the cluster or used in a capacity provider\n\t\t\tstrategy when calling the CreateService or RunTask\n\t\t\tactions.
\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must be created but not associated with another cluster. New Auto Scaling group\n\t\t\tcapacity providers can be created with the CreateCapacityProvider API\n\t\t\toperation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The PutClusterCapacityProviders API operation is used to update the\n\t\t\tlist of available capacity providers for a cluster after the cluster is created.
" + "smithy.api#documentation": "The short name of one or more capacity providers to associate with the cluster. A\n\t\t\tcapacity provider must be associated with a cluster before it can be included as part of\n\t\t\tthe default capacity provider strategy of the cluster or used in a capacity provider\n\t\t\tstrategy when calling the CreateService or RunTask\n\t\t\tactions.
\n\t\tIf specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must be created but not associated with another cluster. New Auto Scaling group\n\t\t\tcapacity providers can be created with the CreateCapacityProvider API\n\t\t\toperation.
\n\t\tTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The PutClusterCapacityProviders API operation is used to update the\n\t\t\tlist of available capacity providers for a cluster after the cluster is created.
" } }, "defaultCapacityProviderStrategy": { "target": "com.amazonaws.ecs#CapacityProviderStrategy", "traits": { - "smithy.api#documentation": "The capacity provider strategy to set as the default for the cluster. After a default\n\t\t\tcapacity provider strategy is set for a cluster, when you call the RunTask or CreateService APIs with no capacity\n\t\t\tprovider strategy or launch type specified, the default capacity provider strategy for\n\t\t\tthe cluster is used.
\nIf a default capacity provider strategy isn't defined for a cluster when it was\n\t\t\tcreated, it can be defined later with the PutClusterCapacityProviders\n\t\t\tAPI operation.
" + "smithy.api#documentation": "The capacity provider strategy to set as the default for the cluster. After a default\n\t\t\tcapacity provider strategy is set for a cluster, when you call the RunTask or CreateService APIs with no capacity\n\t\t\tprovider strategy or launch type specified, the default capacity provider strategy for\n\t\t\tthe cluster is used.
\n\t\tIf a default capacity provider strategy isn't defined for a cluster when it was\n\t\t\tcreated, it can be defined later with the PutClusterCapacityProviders\n\t\t\tAPI operation.
" } }, "serviceConnectDefaults": { "target": "com.amazonaws.ecs#ClusterServiceConnectDefaultsRequest", "traits": { - "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } } @@ -4109,7 +4118,7 @@ } ], "traits": { - "smithy.api#documentation": "Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\nTasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Runs and maintains your desired number of tasks from a specified task definition. If\n\t\t\tthe number of tasks running in a service drops below the desiredCount
,\n\t\t\tAmazon ECS runs another copy of the task in the specified cluster. To update an existing\n\t\t\tservice, see the UpdateService action.
In addition to maintaining the desired count of tasks in your service, you can\n\t\t\toptionally run your service behind one or more load balancers. The load balancers\n\t\t\tdistribute traffic across the tasks that are associated with the service. For more\n\t\t\tinformation, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\n\t\tTasks for services that don't use a load balancer are considered healthy if they're in\n\t\t\tthe RUNNING
state. Tasks for services that use a load balancer are\n\t\t\tconsidered healthy if they're in the RUNNING
state and are reported as\n\t\t\thealthy by the load balancer.
There are two service scheduler strategies available:
\n\t\t\n REPLICA
- The replica scheduling strategy places and\n\t\t\t\t\tmaintains your desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. For\n\t\t\t\t\tmore information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
\n DAEMON
- The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks. It also stops tasks\n\t\t\t\t\tthat don't meet the placement constraints. When using this strategy, you don't\n\t\t\t\t\tneed to specify a desired number of tasks, a task placement strategy, or use\n\t\t\t\t\tService Auto Scaling policies. For more information, see Service scheduler concepts in the Amazon Elastic Container Service Developer Guide.
You can optionally specify a deployment configuration for your service. The deployment\n\t\t\tis initiated by changing properties. For example, the deployment might be initiated by\n\t\t\tthe task definition or by your desired count of a service. This is done with an UpdateService operation. The default value for a replica service for\n\t\t\t\tminimumHealthyPercent
is 100%. The default value for a daemon service\n\t\t\tfor minimumHealthyPercent
is 0%.
If a service uses the ECS
deployment controller, the minimum healthy\n\t\t\tpercent represents a lower limit on the number of tasks in a service that must remain in\n\t\t\tthe RUNNING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of your desired number of tasks (rounded up to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can deploy without using additional cluster capacity. For example, if you\n\t\t\tset your service to have desired number of four tasks and a minimum healthy percent of\n\t\t\t50%, the scheduler might stop two existing tasks to free up cluster capacity before\n\t\t\tstarting two new tasks. If they're in the RUNNING
state, tasks for services\n\t\t\tthat don't use a load balancer are considered healthy . If they're in the\n\t\t\t\tRUNNING
state and reported as healthy by the load balancer, tasks for\n\t\t\tservices that do use a load balancer are considered healthy . The\n\t\t\tdefault value for minimum healthy percent is 100%.
If a service uses the ECS
deployment controller, the maximum percent parameter represents an upper limit on the\n\t\t\tnumber of tasks in a service that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment. Specifically, it represents it as a\n\t\t\tpercentage of the desired number of tasks (rounded down to the nearest integer). This\n\t\t\thappens when any of your container instances are in the DRAINING
state if\n\t\t\tthe service contains tasks using the EC2 launch type. Using this\n\t\t\tparameter, you can define the deployment batch size. For example, if your service has a\n\t\t\tdesired number of four tasks and a maximum percent value of 200%, the scheduler may\n\t\t\tstart four new tasks before stopping the four older tasks (provided that the cluster\n\t\t\tresources required to do this are available). The default value for maximum percent is\n\t\t\t200%.
If a service uses either the CODE_DEPLOY
or EXTERNAL
\n\t\t\tdeployment controller types and tasks that use the EC2 launch type, the\n\t\t\t\tminimum healthy percent and maximum percent values are used only to define the lower and upper limit\n\t\t\ton the number of the tasks in the service that remain in the RUNNING
state.\n\t\t\tThis is while the container instances are in the DRAINING
state. If the\n\t\t\ttasks in the service use the Fargate launch type, the minimum healthy\n\t\t\tpercent and maximum percent values aren't used. This is the case even if they're\n\t\t\tcurrently visible when describing your service.
When creating a service that uses the EXTERNAL
deployment controller, you\n\t\t\tcan specify only parameters that aren't controlled at the task set level. The only\n\t\t\trequired parameter is the service name. You control your services using the CreateTaskSet operation. For more information, see Amazon ECS deployment types in the Amazon Elastic Container Service Developer Guide.
When the service scheduler launches new tasks, it determines task placement. For\n\t\t\tinformation about task placement and task placement strategies, see Amazon ECS\n\t\t\t\ttask placement in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#CreateServiceRequest": { @@ -4131,25 +4140,25 @@ "taskDefinition": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run in your service. If a revision
\n\t\t\tisn't specified, the latest ACTIVE
revision is used.
A task definition must be specified if the service uses either the ECS
or\n\t\t\t\tCODE_DEPLOY
deployment controllers.
The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run in your service. If a revision
\n\t\t\tisn't specified, the latest ACTIVE
revision is used.
A task definition must be specified if the service uses either the ECS
or\n\t\t\t\tCODE_DEPLOY
deployment controllers.
A load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\nIf the service uses the rolling update (ECS
) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
If the service uses the CODE_DEPLOY
deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair
). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY
, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.
If you use the CODE_DEPLOY
deployment controller, these values can be\n\t\t\tchanged when updating the service.
For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.
\nFor Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted.\n\t\t\tWhen a task from this service is placed on a container instance, the container instance\n\t\t\tis registered with the load balancer that's specified here.
\nServices with tasks that use the awsvpc
network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip
as the target type, not instance
. This is because\n\t\t\ttasks that use the awsvpc
network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.
A load balancer object representing the load balancers to use with your service. For\n\t\t\tmore information, see Service load balancing in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf the service uses the rolling update (ECS
) deployment controller and\n\t\t\tusing either an Application Load Balancer or Network Load Balancer, you must specify one or more target group ARNs to attach\n\t\t\tto the service. The service-linked role is required for services that use multiple\n\t\t\ttarget groups. For more information, see Using service-linked roles for Amazon ECS in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
If the service uses the CODE_DEPLOY
deployment controller, the service is\n\t\t\trequired to use either an Application Load Balancer or Network Load Balancer. When creating an CodeDeploy deployment group, you\n\t\t\tspecify two target groups (referred to as a targetGroupPair
). During a\n\t\t\tdeployment, CodeDeploy determines which task set in your service has the status\n\t\t\t\tPRIMARY
, and it associates one target group with it. Then, it also\n\t\t\tassociates the other target group with the replacement task set. The load balancer can\n\t\t\talso have up to two listeners: a required listener for production traffic and an\n\t\t\toptional listener that you can use to perform validation tests with Lambda functions\n\t\t\tbefore routing production traffic to it.
If you use the CODE_DEPLOY
deployment controller, these values can be\n\t\t\tchanged when updating the service.
For Application Load Balancers and Network Load Balancers, this object must contain the load balancer target group ARN,\n\t\t\tthe container name, and the container port to access from the load balancer. The\n\t\t\tcontainer name must be as it appears in a container definition. The load balancer name\n\t\t\tparameter must be omitted. When a task from this service is placed on a container\n\t\t\tinstance, the container instance and port combination is registered as a target in the\n\t\t\ttarget group that's specified here.
\n\t\tFor Classic Load Balancers, this object must contain the load balancer name, the container name , and\n\t\t\tthe container port to access from the load balancer. The container name must be as it\n\t\t\tappears in a container definition. The target group ARN parameter must be omitted.\n\t\t\tWhen a task from this service is placed on a container instance, the container instance\n\t\t\tis registered with the load balancer that's specified here.
\n\t\tServices with tasks that use the awsvpc
network mode (for example, those\n\t\t\twith the Fargate launch type) only support Application Load Balancers and Network Load Balancers. Classic Load Balancers\n\t\t\taren't supported. Also, when you create any target groups for these services, you must\n\t\t\tchoose ip
as the target type, not instance
. This is because\n\t\t\ttasks that use the awsvpc
network mode are associated with an elastic\n\t\t\tnetwork interface, not an Amazon EC2 instance.
The details of the service discovery registry to associate with this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tdiscovery.
\nEach service may be associated with one service registry. Multiple service\n\t\t\t\tregistries for each service isn't supported.
\nThe details of the service discovery registry to associate with this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tdiscovery.
\n\t\tEach service may be associated with one service registry. Multiple service\n\t\t\t\tregistries for each service isn't supported.
\n\t\tThe number of instantiations of the specified task definition to place and keep\n\t\t\trunning on your cluster.
\nThis is required if schedulingStrategy
is REPLICA
or isn't\n\t\t\tspecified. If schedulingStrategy
is DAEMON
then this isn't\n\t\t\trequired.
The number of instantiations of the specified task definition to place and keep\n\t\t\trunning on your cluster.
\n\t\tThis is required if schedulingStrategy
is REPLICA
or isn't\n\t\t\tspecified. If schedulingStrategy
is DAEMON
then this isn't\n\t\t\trequired.
The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\nThe FARGATE
launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.
Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS User Guide for Fargate.
\nThe EC2
launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The infrastructure that you run your service on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\n\t\tThe FARGATE
launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.
Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS User Guide for Fargate.
\n\t\tThe EC2
launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.
A service can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The capacity provider strategy to use for the service.
\nIf a capacityProviderStrategy
is specified, the launchType
\n\t\t\tparameter must be omitted. If no capacityProviderStrategy
or\n\t\t\t\tlaunchType
is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy
for the cluster is used.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" + "smithy.api#documentation": "The capacity provider strategy to use for the service.
\n\t\tIf a capacityProviderStrategy
is specified, the launchType
\n\t\t\tparameter must be omitted. If no capacityProviderStrategy
or\n\t\t\t\tlaunchType
is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy
for the cluster is used.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" } }, "platformVersion": { @@ -4179,7 +4188,7 @@ "role": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your\n\t\t\tload balancer on your behalf. This parameter is only permitted if you are using a load\n\t\t\tbalancer with your service and your task definition doesn't use the awsvpc
\n\t\t\tnetwork mode. If you specify the role
parameter, you must also specify a\n\t\t\tload balancer object with the loadBalancers
parameter.
If your account has already created the Amazon ECS service-linked role, that role is\n\t\t\t\tused for your service unless you specify a role here. The service-linked role is\n\t\t\t\trequired if your task definition uses the awsvpc
network mode or if the\n\t\t\t\tservice is configured to use service discovery, an external deployment controller,\n\t\t\t\tmultiple target groups, or Elastic Inference accelerators in which case you don't\n\t\t\t\tspecify a role here. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
If your specified role has a path other than /
, then you must either\n\t\t\tspecify the full role ARN (this is recommended) or prefix the role name with the path.\n\t\t\tFor example, if a role with the name bar
has a path of /foo/
\n\t\t\tthen you would specify /foo/bar
as the role name. For more information, see\n\t\t\t\tFriendly names and paths in the IAM User Guide.
The name or full Amazon Resource Name (ARN) of the IAM role that allows Amazon ECS to make calls to your\n\t\t\tload balancer on your behalf. This parameter is only permitted if you are using a load\n\t\t\tbalancer with your service and your task definition doesn't use the awsvpc
\n\t\t\tnetwork mode. If you specify the role
parameter, you must also specify a\n\t\t\tload balancer object with the loadBalancers
parameter.
If your account has already created the Amazon ECS service-linked role, that role is\n\t\t\t\tused for your service unless you specify a role here. The service-linked role is\n\t\t\t\trequired if your task definition uses the awsvpc
network mode or if the\n\t\t\t\tservice is configured to use service discovery, an external deployment controller,\n\t\t\t\tmultiple target groups, or Elastic Inference accelerators in which case you don't\n\t\t\t\tspecify a role here. For more information, see Using\n\t\t\t\t\tservice-linked roles for Amazon ECS in the Amazon Elastic Container Service Developer Guide.
If your specified role has a path other than /
, then you must either\n\t\t\tspecify the full role ARN (this is recommended) or prefix the role name with the path.\n\t\t\tFor example, if a role with the name bar
has a path of /foo/
\n\t\t\tthen you would specify /foo/bar
as the role name. For more information, see\n\t\t\t\tFriendly names and paths in the IAM User Guide.
The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0
is used.
If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod
in\n\t\t\tthe task definition health check parameters. For more information, see Health\n\t\t\t\tcheck.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to\n\t\t\t2,147,483,647\n\t\t\tseconds (about 69 years). During that time, the Amazon ECS service\n\t\t\tscheduler ignores health check status. This grace period can prevent the service\n\t\t\tscheduler from marking tasks as unhealthy and stopping them before they have time to\n\t\t\tcome up.
" + "smithy.api#documentation": "The period of time, in seconds, that the Amazon ECS service scheduler ignores unhealthy\n\t\t\tElastic Load Balancing target health checks after a task has first started. This is only used when your\n\t\t\tservice is configured to use a load balancer. If your service has a load balancer\n\t\t\tdefined and you don't specify a health check grace period value, the default value of\n\t\t\t\t0
is used.
If you do not use an Elastic Load Balancing, we recommend that you use the startPeriod
in\n\t\t\tthe task definition health check parameters. For more information, see Health\n\t\t\t\tcheck.
If your service's tasks take a while to start and respond to Elastic Load Balancing health checks, you\n\t\t\tcan specify a health check grace period of up to\n\t\t\t2,147,483,647\n\t\t\tseconds (about 69 years). During that time, the Amazon ECS service\n\t\t\tscheduler ignores health check status. This grace period can prevent the service\n\t\t\tscheduler from marking tasks as unhealthy and stopping them before they have time to\n\t\t\tcome up.
" } }, "schedulingStrategy": { "target": "com.amazonaws.ecs#SchedulingStrategy", "traits": { - "smithy.api#documentation": "The scheduling strategy to use for the service. For more information, see Services.
\nThere are two service scheduler strategies available:
\n\n REPLICA
-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. This\n\t\t\t\t\tscheduler strategy is required if the service uses the CODE_DEPLOY
\n\t\t\t\t\tor EXTERNAL
deployment controller types.
\n DAEMON
-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks and will stop tasks\n\t\t\t\t\tthat don't meet the placement constraints. When you're using this strategy, you\n\t\t\t\t\tdon't need to specify a desired number of tasks, a task placement strategy, or\n\t\t\t\t\tuse Service Auto Scaling policies.
Tasks using the Fargate launch type or the\n\t\t\t\t\t\t\tCODE_DEPLOY
or EXTERNAL
deployment controller\n\t\t\t\t\t\ttypes don't support the DAEMON
scheduling strategy.
The scheduling strategy to use for the service. For more information, see Services.
\n\t\tThere are two service scheduler strategies available:
\n\t\t\n REPLICA
-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement decisions. This\n\t\t\t\t\tscheduler strategy is required if the service uses the CODE_DEPLOY
\n\t\t\t\t\tor EXTERNAL
deployment controller types.
\n DAEMON
-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance that meets all of the task placement\n\t\t\t\t\tconstraints that you specify in your cluster. The service scheduler also\n\t\t\t\t\tevaluates the task placement constraints for running tasks and will stop tasks\n\t\t\t\t\tthat don't meet the placement constraints. When you're using this strategy, you\n\t\t\t\t\tdon't need to specify a desired number of tasks, a task placement strategy, or\n\t\t\t\t\tuse Service Auto Scaling policies.
Tasks using the Fargate launch type or the\n\t\t\t\t\t\t\tCODE_DEPLOY
or EXTERNAL
deployment controller\n\t\t\t\t\t\ttypes don't support the DAEMON
scheduling strategy.
The metadata that you apply to the service to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value, both of which you define. When a\n\t\t\tservice is deleted, the tags are deleted as well.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the service to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value, both of which you define. When a\n\t\t\tservice is deleted, the tags are deleted as well.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } } @@ -4264,7 +4273,7 @@ "service": { "target": "com.amazonaws.ecs#Service", "traits": { - "smithy.api#documentation": "The full description of your service following the create call.
\nA service will return either a capacityProviderStrategy
or\n\t\t\t\tlaunchType
parameter, but not both, depending where one was specified\n\t\t\twhen it was created.
If a service is using the ECS
deployment controller, the\n\t\t\t\tdeploymentController
and taskSets
parameters will not be\n\t\t\treturned.
if the service uses the CODE_DEPLOY
deployment controller, the\n\t\t\t\tdeploymentController
, taskSets
and\n\t\t\t\tdeployments
parameters will be returned, however the\n\t\t\t\tdeployments
parameter will be an empty list.
The full description of your service following the create call.
\n\t\tA service will return either a capacityProviderStrategy
or\n\t\t\t\tlaunchType
parameter, but not both, depending where one was specified\n\t\t\twhen it was created.
If a service is using the ECS
deployment controller, the\n\t\t\t\tdeploymentController
and taskSets
parameters will not be\n\t\t\treturned.
if the service uses the CODE_DEPLOY
deployment controller, the\n\t\t\t\tdeploymentController
, taskSets
and\n\t\t\t\tdeployments
parameters will be returned, however the\n\t\t\t\tdeployments
parameter will be an empty list.
The launch type that new tasks in the task set uses. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\nIf a launchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The launch type that new tasks in the task set uses. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf a launchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
The capacity provider strategy to use for the task set.
\nA capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase
and weight
to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity\n\t\t\tprovider with a cluster. Only capacity providers with an ACTIVE
or\n\t\t\t\tUPDATING
status can be used.
If a capacityProviderStrategy
is specified, the launchType
\n\t\t\tparameter must be omitted. If no capacityProviderStrategy
or\n\t\t\t\tlaunchType
is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy
for the cluster is used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The PutClusterCapacityProviders API operation is used to update the\n\t\t\tlist of available capacity providers for a cluster after the cluster is created.
" + "smithy.api#documentation": "The capacity provider strategy to use for the task set.
\n\t\tA capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase
and weight
to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity\n\t\t\tprovider with a cluster. Only capacity providers with an ACTIVE
or\n\t\t\t\tUPDATING
status can be used.
If a capacityProviderStrategy
is specified, the launchType
\n\t\t\tparameter must be omitted. If no capacityProviderStrategy
or\n\t\t\t\tlaunchType
is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy
for the cluster is used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\n\t\tTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The PutClusterCapacityProviders API operation is used to update the\n\t\t\tlist of available capacity providers for a cluster after the cluster is created.
" } }, "platformVersion": { @@ -4397,7 +4406,7 @@ "tags": { "target": "com.amazonaws.ecs#Tags", "traits": { - "smithy.api#documentation": "The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both. When a service is\n\t\t\tdeleted, the tags are deleted.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both. When a service is\n\t\t\tdeleted, the tags are deleted.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
Deletes the specified capacity provider.
\nThe FARGATE
and FARGATE_SPOT
capacity providers are\n\t\t\t\treserved and can't be deleted. You can disassociate them from a cluster using either\n\t\t\t\tthe PutClusterCapacityProviders API or by deleting the\n\t\t\t\tcluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from\n\t\t\tthe capacity provider strategy from all services. The UpdateService\n\t\t\tAPI can be used to remove a capacity provider from a service's capacity provider\n\t\t\tstrategy. When updating a service, the forceNewDeployment
option can be\n\t\t\tused to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity\n\t\t\tprovider are transitioned to use the capacity from the remaining capacity providers.\n\t\t\tOnly capacity providers that aren't associated with a cluster can be deleted. To remove\n\t\t\ta capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Deletes the specified capacity provider.
\n\t\tThe FARGATE
and FARGATE_SPOT
capacity providers are\n\t\t\t\treserved and can't be deleted. You can disassociate them from a cluster using either\n\t\t\t\tthe PutClusterCapacityProviders API or by deleting the\n\t\t\t\tcluster.
Prior to a capacity provider being deleted, the capacity provider must be removed from\n\t\t\tthe capacity provider strategy from all services. The UpdateService\n\t\t\tAPI can be used to remove a capacity provider from a service's capacity provider\n\t\t\tstrategy. When updating a service, the forceNewDeployment
option can be\n\t\t\tused to ensure that any tasks using the Amazon EC2 instance capacity provided by the capacity\n\t\t\tprovider are transitioned to use the capacity from the remaining capacity providers.\n\t\t\tOnly capacity providers that aren't associated with a cluster can be deleted. To remove\n\t\t\ta capacity provider from a cluster, you can either use PutClusterCapacityProviders or delete the cluster.
Deletes the specified cluster. The cluster transitions to the INACTIVE
\n\t\t\tstate. Clusters with an INACTIVE
status might remain discoverable in your\n\t\t\taccount for a period of time. However, this behavior is subject to change in the future.\n\t\t\tWe don't recommend that you rely on INACTIVE
clusters persisting.
You must deregister all container instances from this cluster before you may delete\n\t\t\tit. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.
" + "smithy.api#documentation": "Deletes the specified cluster. The cluster transitions to the INACTIVE
\n\t\t\tstate. Clusters with an INACTIVE
status might remain discoverable in your\n\t\t\taccount for a period of time. However, this behavior is subject to change in the future.\n\t\t\tWe don't recommend that you rely on INACTIVE
clusters persisting.
You must deregister all container instances from this cluster before you may delete\n\t\t\tit. You can list the container instances in a cluster with ListContainerInstances and deregister them with DeregisterContainerInstance.
" } }, "com.amazonaws.ecs#DeleteClusterRequest": { @@ -4650,7 +4659,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a specified service within a cluster. You can delete a service if you have no\n\t\t\trunning tasks in it and the desired task count is zero. If the service is actively\n\t\t\tmaintaining tasks, you can't delete it, and you must update the service to a desired\n\t\t\ttask count of zero. For more information, see UpdateService.
\nWhen you delete a service, if there are still running tasks that require cleanup,\n\t\t\t\tthe service status moves from ACTIVE
to DRAINING
, and the\n\t\t\t\tservice is no longer visible in the console or in the ListServices\n\t\t\t\tAPI operation. After all tasks have transitioned to either STOPPING
or\n\t\t\t\t\tSTOPPED
status, the service status moves from DRAINING
\n\t\t\t\tto INACTIVE
. Services in the DRAINING
or\n\t\t\t\t\tINACTIVE
status can still be viewed with the DescribeServices API operation. However, in the future,\n\t\t\t\t\tINACTIVE
services may be cleaned up and purged from Amazon ECS record\n\t\t\t\tkeeping, and DescribeServices calls on those services return a\n\t\t\t\t\tServiceNotFoundException
error.
If you attempt to create a new service with the same name as an existing service\n\t\t\t\tin either ACTIVE
or DRAINING
status, you receive an\n\t\t\t\terror.
Deletes a specified service within a cluster. You can delete a service if you have no\n\t\t\trunning tasks in it and the desired task count is zero. If the service is actively\n\t\t\tmaintaining tasks, you can't delete it, and you must update the service to a desired\n\t\t\ttask count of zero. For more information, see UpdateService.
\n\t\tWhen you delete a service, if there are still running tasks that require cleanup,\n\t\t\t\tthe service status moves from ACTIVE
to DRAINING
, and the\n\t\t\t\tservice is no longer visible in the console or in the ListServices\n\t\t\t\tAPI operation. After all tasks have transitioned to either STOPPING
or\n\t\t\t\t\tSTOPPED
status, the service status moves from DRAINING
\n\t\t\t\tto INACTIVE
. Services in the DRAINING
or\n\t\t\t\t\tINACTIVE
status can still be viewed with the DescribeServices API operation. However, in the future,\n\t\t\t\t\tINACTIVE
services may be cleaned up and purged from Amazon ECS record\n\t\t\t\tkeeping, and DescribeServices calls on those services return a\n\t\t\t\t\tServiceNotFoundException
error.
If you attempt to create a new service with the same name as an existing service\n\t\t\t\tin either ACTIVE
or DRAINING
status, you receive an\n\t\t\t\terror.
The status of the deployment. The following describes each state.
\nThe most recent deployment of a service.
\nA service deployment that still has running tasks, but are in the process\n\t\t\t\t\t\tof being replaced with a new PRIMARY
deployment.
A deployment that has been completely replaced.
\nThe status of the deployment. The following describes each state.
\n\t\tThe most recent deployment of a service.
\n\t\t\t\tA service deployment that still has running tasks, but are in the process\n\t\t\t\t\t\tof being replaced with a new PRIMARY
deployment.
A deployment that has been completely replaced.
\n\t\t\t\tThe number of consecutively failed tasks in the deployment. A task is considered a\n\t\t\tfailure if the service scheduler can't launch the task, the task doesn't transition to a\n\t\t\t\tRUNNING
state, or if it fails any of its defined health checks and is\n\t\t\tstopped.
Once a service deployment has one or more successfully running tasks, the failed\n\t\t\t\ttask count resets to zero and stops being evaluated.
\nThe number of consecutively failed tasks in the deployment. A task is considered a\n\t\t\tfailure if the service scheduler can't launch the task, the task doesn't transition to a\n\t\t\t\tRUNNING
state, or if it fails any of its defined health checks and is\n\t\t\tstopped.
Once a service deployment has one or more successfully running tasks, the failed\n\t\t\t\ttask count resets to zero and stops being evaluated.
\n\t\tThe operating system that your tasks in the service, or tasks are running on. A\n\t\t\tplatform family is specified only for tasks using the Fargate launch type.
\n All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily
value as the service, for example, \n\t\t\tLINUX.
.
The operating system that your tasks in the service, or tasks are running on. A\n\t\t\tplatform family is specified only for tasks using the Fargate launch type.
\n\t\t All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily
value as the service, for example, \n\t\t\tLINUX.
.
The rolloutState
of a service is only returned for services that use\n\t\t\t\tthe rolling update (ECS
) deployment type that aren't behind a\n\t\t\t\tClassic Load Balancer.
The rollout state of the deployment. When a service deployment is started, it begins\n\t\t\tin an IN_PROGRESS
state. When the service reaches a steady state, the\n\t\t\tdeployment transitions to a COMPLETED
state. If the service fails to reach\n\t\t\ta steady state and circuit breaker is enabled, the deployment transitions to a\n\t\t\t\tFAILED
state. A deployment in FAILED
state doesn't launch\n\t\t\tany new tasks. For more information, see DeploymentCircuitBreaker.
The rolloutState
of a service is only returned for services that use\n\t\t\t\tthe rolling update (ECS
) deployment type that aren't behind a\n\t\t\t\tClassic Load Balancer.
The rollout state of the deployment. When a service deployment is started, it begins\n\t\t\tin an IN_PROGRESS
state. When the service reaches a steady state, the\n\t\t\tdeployment transitions to a COMPLETED
state. If the service fails to reach\n\t\t\ta steady state and circuit breaker is enabled, the deployment transitions to a\n\t\t\t\tFAILED
state. A deployment in FAILED
state doesn't launch\n\t\t\tany new tasks. For more information, see DeploymentCircuitBreaker.
The details of the Service Connect configuration that's used by this deployment.\n\t\t\tCompare the configuration between multiple deployments when troubleshooting issues with\n\t\t\tnew deployments.
\nThe configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The details of the Service Connect configuration that's used by this deployment.\n\t\t\tCompare the configuration between multiple deployments when troubleshooting issues with\n\t\t\tnew deployments.
\n\t\tThe configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "serviceConnectResources": { @@ -4892,37 +4901,6 @@ "smithy.api#documentation": "The details of an Amazon ECS service deployment. This is used only when a service uses the\n\t\t\t\tECS
deployment controller type.
One or more CloudWatch alarm names. Use a \",\" to separate the alarms.
", - "smithy.api#required": {} - } - }, - "enable": { - "target": "com.amazonaws.ecs#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Determines whether to use the CloudWatch alarm option in the service deployment process.
", - "smithy.api#required": {} - } - }, - "rollback": { - "target": "com.amazonaws.ecs#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Determines whether to configure Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is used, when a service deployment fails, the service is rolled back\n\t\t\tto the last deployment that completed successfully.
", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "One of the methods which provide a way for you to quickly identify when a deployment\n\t\t\thas failed, and then to optionally roll back the failure to the last working\n\t\t\tdeployment.
\nWhen the alarms are generated, Amazon ECS sets the service deployment to failed. Set the rollback\n\t\t\tparameter to have Amazon ECS to roll back your service to the last completed deployment\n\t\t\tafter a failure.
\nYou can only use the DeploymentAlarms
method to detect failures when the\n\t\t\t\tDeploymentController
is set to ECS
(rolling\n\t\t\tupdate).
For more information, see Rolling\n\t\t\t\tupdate in the \n Amazon Elastic Container Service Developer Guide\n .
" - } - }, "com.amazonaws.ecs#DeploymentCircuitBreaker": { "type": "structure", "members": { @@ -4938,13 +4916,13 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Determines whether to configure Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is on, when a service deployment fails, the service is rolled back to\n\t\t\tthe last deployment that completed successfully.
", + "smithy.api#documentation": "Determines whether to configure Amazon ECS to roll back the service if a service deployment\n\t\t\tfails. If rollback is enabled, when a service deployment fails, the service is rolled\n\t\t\tback to the last deployment that completed successfully.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS
) deployment type that aren't behind a Classic Load Balancer.
The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If enabled, a\n\t\t\tservice deployment will transition to a failed state and stop launching new tasks. You\n\t\t\tcan also configure Amazon ECS to roll back your service to the last completed deployment\n\t\t\tafter a failure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS
) deployment type that aren't behind a Classic Load Balancer.
The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If enabled, a\n\t\t\tservice deployment will transition to a failed state and stop launching new tasks. You\n\t\t\tcan also configure Amazon ECS to roll back your service to the last completed deployment\n\t\t\tafter a failure. For more information, see Rolling\n\t\t\t\tupdate in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#DeploymentConfiguration": { @@ -4953,25 +4931,19 @@ "deploymentCircuitBreaker": { "target": "com.amazonaws.ecs#DeploymentCircuitBreaker", "traits": { - "smithy.api#documentation": "The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS
) deployment type.
The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If deployment\n\t\t\tcircuit breaker is enabled, a service deployment will transition to a failed state and\n\t\t\tstop launching new tasks. If rollback is enabled, when a service deployment fails, the\n\t\t\tservice is rolled back to the last deployment that completed successfully.
" + "smithy.api#documentation": "The deployment circuit breaker can only be used for services using the rolling\n\t\t\t\tupdate (ECS
) deployment type.
The deployment circuit breaker determines whether a\n\t\t\tservice deployment will fail if the service can't reach a steady state. If deployment\n\t\t\tcircuit breaker is enabled, a service deployment will transition to a failed state and\n\t\t\tstop launching new tasks. If rollback is enabled, when a service deployment fails, the\n\t\t\tservice is rolled back to the last deployment that completed successfully.
" } }, "maximumPercent": { "target": "com.amazonaws.ecs#BoxedInteger", "traits": { - "smithy.api#documentation": "If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tmaximumPercent
parameter represents an upper limit on the number of\n\t\t\tyour service's tasks that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount
(rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using\n\t\t\tthe REPLICA
service scheduler and has a desiredCount
of four\n\t\t\ttasks and a maximumPercent
value of 200%, the scheduler may start four new\n\t\t\ttasks before stopping the four older tasks (provided that the cluster resources required\n\t\t\tto do this are available). The default maximumPercent
value for a service\n\t\t\tusing the REPLICA
service scheduler is 200%.
If a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and tasks that use the EC2\n\t\t\tlaunch type, the maximum percent value is set to the\n\t\t\tdefault value and is used to define the upper limit on the number of the tasks in the\n\t\t\tservice that remain in the RUNNING
state while the container instances are\n\t\t\tin the DRAINING
state. If the tasks in the service use the\n\t\t\tFargate launch type, the maximum percent value is not used, although it is\n\t\t\treturned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tmaximumPercent
parameter represents an upper limit on the number of\n\t\t\tyour service's tasks that are allowed in the RUNNING
or\n\t\t\t\tPENDING
state during a deployment, as a percentage of the\n\t\t\t\tdesiredCount
(rounded down to the nearest integer). This parameter\n\t\t\tenables you to define the deployment batch size. For example, if your service is using\n\t\t\tthe REPLICA
service scheduler and has a desiredCount
of four\n\t\t\ttasks and a maximumPercent
value of 200%, the scheduler may start four new\n\t\t\ttasks before stopping the four older tasks (provided that the cluster resources required\n\t\t\tto do this are available). The default maximumPercent
value for a service\n\t\t\tusing the REPLICA
service scheduler is 200%.
If a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and tasks that use the EC2\n\t\t\tlaunch type, the maximum percent value is set to the\n\t\t\tdefault value and is used to define the upper limit on the number of the tasks in the\n\t\t\tservice that remain in the RUNNING
state while the container instances are\n\t\t\tin the DRAINING
state. If the tasks in the service use the\n\t\t\tFargate launch type, the maximum percent value is not used, although it is\n\t\t\treturned when describing your service.
If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tminimumHealthyPercent
represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING
state during a deployment,\n\t\t\tas a percentage of the desiredCount
(rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount
of four tasks and a\n\t\t\t\tminimumHealthyPercent
of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following\n\t\t\tshould be noted:
\nA service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.
\nIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING
\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.
If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.
\nFor services are that do use a load balancer, the following\n\t\t\tshould be noted:
\nIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.
\nIf a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.
\nIf a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING
\n\t\t\tstate while the container instances are in the DRAINING
state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY
) or EXTERNAL
\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.
Information about the CloudWatch alarms.
" + "smithy.api#documentation": "If a service is using the rolling update (ECS
) deployment type, the\n\t\t\t\tminimumHealthyPercent
represents a lower limit on the number of your\n\t\t\tservice's tasks that must remain in the RUNNING
state during a deployment,\n\t\t\tas a percentage of the desiredCount
(rounded up to the nearest integer).\n\t\t\tThis parameter enables you to deploy without using additional cluster capacity. For\n\t\t\texample, if your service has a desiredCount
of four tasks and a\n\t\t\t\tminimumHealthyPercent
of 50%, the service scheduler may stop two\n\t\t\texisting tasks to free up cluster capacity before starting two new tasks.
For services that do not use a load balancer, the following\n\t\t\tshould be noted:
\n\t\tA service is considered healthy if all essential containers within the tasks\n\t\t\t\t\tin the service pass their health checks.
\n\t\t\tIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for 40 seconds after a task reaches a RUNNING
\n\t\t\t\t\tstate before the task is counted towards the minimum healthy percent\n\t\t\t\t\ttotal.
If a task has one or more essential containers with a health check defined,\n\t\t\t\t\tthe service scheduler will wait for the task to reach a healthy status before\n\t\t\t\t\tcounting it towards the minimum healthy percent total. A task is considered\n\t\t\t\t\thealthy when all essential containers within the task have passed their health\n\t\t\t\t\tchecks. The amount of time the service scheduler can wait for is determined by\n\t\t\t\t\tthe container health check settings.
\n\t\t\tFor services are that do use a load balancer, the following\n\t\t\tshould be noted:
\n\t\tIf a task has no essential containers with a health check defined, the service\n\t\t\t\t\tscheduler will wait for the load balancer target group health check to return a\n\t\t\t\t\thealthy status before counting the task towards the minimum healthy percent\n\t\t\t\t\ttotal.
\n\t\t\tIf a task has an essential container with a health check defined, the service\n\t\t\t\t\tscheduler will wait for both the task to reach a healthy status and the load\n\t\t\t\t\tbalancer target group health check to return a healthy status before counting\n\t\t\t\t\tthe task towards the minimum healthy percent total.
\n\t\t\tIf a service is using either the blue/green (CODE_DEPLOY
) or\n\t\t\t\tEXTERNAL
deployment types and is running tasks that use the\n\t\t\tEC2 launch type, the minimum healthy\n\t\t\t\tpercent value is set to the default value and is used to define the lower\n\t\t\tlimit on the number of the tasks in the service that remain in the RUNNING
\n\t\t\tstate while the container instances are in the DRAINING
state. If a service\n\t\t\tis using either the blue/green (CODE_DEPLOY
) or EXTERNAL
\n\t\t\tdeployment types and is running tasks that use the Fargate launch type,\n\t\t\tthe minimum healthy percent value is not used, although it is returned when describing\n\t\t\tyour service.
The deployment controller type to use.
\nThere are three deployment controller types available:
\nThe rolling update (ECS
) deployment type involves replacing\n\t\t\t\t\t\tthe current running version of the container with the latest version. The\n\t\t\t\t\t\tnumber of containers Amazon ECS adds or removes from the service during a rolling\n\t\t\t\t\t\tupdate is controlled by adjusting the minimum and maximum number of healthy\n\t\t\t\t\t\ttasks allowed during a service deployment, as specified in the DeploymentConfiguration.
The blue/green (CODE_DEPLOY
) deployment type uses the\n\t\t\t\t\t\tblue/green deployment model powered by CodeDeploy, which allows you to verify a\n\t\t\t\t\t\tnew deployment of a service before sending production traffic to it.
The external (EXTERNAL
) deployment type enables you to use\n\t\t\t\t\t\tany third-party deployment controller for full control over the deployment\n\t\t\t\t\t\tprocess for an Amazon ECS service.
The deployment controller type to use.
\n\t\tThere are three deployment controller types available:
\n\t\tThe rolling update (ECS
) deployment type involves replacing\n\t\t\t\t\t\tthe current running version of the container with the latest version. The\n\t\t\t\t\t\tnumber of containers Amazon ECS adds or removes from the service during a rolling\n\t\t\t\t\t\tupdate is controlled by adjusting the minimum and maximum number of healthy\n\t\t\t\t\t\ttasks allowed during a service deployment, as specified in the DeploymentConfiguration.
The blue/green (CODE_DEPLOY
) deployment type uses the\n\t\t\t\t\t\tblue/green deployment model powered by CodeDeploy, which allows you to verify a\n\t\t\t\t\t\tnew deployment of a service before sending production traffic to it.
The external (EXTERNAL
) deployment type enables you to use\n\t\t\t\t\t\tany third-party deployment controller for full control over the deployment\n\t\t\t\t\t\tprocess for an Amazon ECS service.
Deregisters an Amazon ECS container instance from the specified cluster. This instance is\n\t\t\tno longer available to run tasks.
\nIf you intend to use the container instance for some other purpose after\n\t\t\tderegistration, we recommend that you stop all of the tasks running on the container\n\t\t\tinstance before deregistration. That prevents any orphaned tasks from consuming\n\t\t\tresources.
\nDeregistering a container instance removes the instance from a cluster, but it doesn't\n\t\t\tterminate the EC2 instance. If you are finished using the instance, be sure to terminate\n\t\t\tit in the Amazon EC2 console to stop billing.
\nIf you terminate a running container instance, Amazon ECS automatically deregisters the\n\t\t\t\tinstance from your cluster (stopped container instances or instances with\n\t\t\t\tdisconnected agents aren't automatically deregistered when terminated).
\nDeregisters an Amazon ECS container instance from the specified cluster. This instance is\n\t\t\tno longer available to run tasks.
\n\t\tIf you intend to use the container instance for some other purpose after\n\t\t\tderegistration, we recommend that you stop all of the tasks running on the container\n\t\t\tinstance before deregistration. That prevents any orphaned tasks from consuming\n\t\t\tresources.
\n\t\tDeregistering a container instance removes the instance from a cluster, but it doesn't\n\t\t\tterminate the EC2 instance. If you are finished using the instance, be sure to terminate\n\t\t\tit in the Amazon EC2 console to stop billing.
\n\t\tIf you terminate a running container instance, Amazon ECS automatically deregisters the\n\t\t\t\tinstance from your cluster (stopped container instances or instances with\n\t\t\t\tdisconnected agents aren't automatically deregistered when terminated).
\n\t\tForces the container instance to be deregistered. If you have tasks running on the\n\t\t\tcontainer instance when you deregister it with the force
option, these\n\t\t\ttasks remain running until you terminate the instance or the tasks stop through some\n\t\t\tother means, but they're orphaned (no longer monitored or accounted for by Amazon ECS). If an\n\t\t\torphaned task on your container instance is part of an Amazon ECS service, then the service\n\t\t\tscheduler starts another copy of that task, on a different container instance if\n\t\t\tpossible.
Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer\n\t\t\ttarget group are deregistered. They begin connection draining according to the settings\n\t\t\ton the load balancer or target group.
" + "smithy.api#documentation": "Forces the container instance to be deregistered. If you have tasks running on the\n\t\t\tcontainer instance when you deregister it with the force
option, these\n\t\t\ttasks remain running until you terminate the instance or the tasks stop through some\n\t\t\tother means, but they're orphaned (no longer monitored or accounted for by Amazon ECS). If an\n\t\t\torphaned task on your container instance is part of an Amazon ECS service, then the service\n\t\t\tscheduler starts another copy of that task, on a different container instance if\n\t\t\tpossible.
Any containers in orphaned service tasks that are registered with a Classic Load Balancer or an Application Load Balancer\n\t\t\ttarget group are deregistered. They begin connection draining according to the settings\n\t\t\ton the load balancer or target group.
" } } } @@ -5127,7 +5099,7 @@ } ], "traits": { - "smithy.api#documentation": "Deregisters the specified task definition by family and revision. Upon deregistration,\n\t\t\tthe task definition is marked as INACTIVE
. Existing tasks and services that\n\t\t\treference an INACTIVE
task definition continue to run without disruption.\n\t\t\tExisting services that reference an INACTIVE
task definition can still\n\t\t\tscale up or down by modifying the service's desired count.
You can't use an INACTIVE
task definition to run new tasks or create new\n\t\t\tservices, and you can't update an existing service to reference an INACTIVE
\n\t\t\ttask definition. However, there may be up to a 10-minute window following deregistration\n\t\t\twhere these restrictions have not yet taken effect.
At this time, INACTIVE
task definitions remain discoverable in your\n\t\t\t\taccount indefinitely. However, this behavior is subject to change in the future. We\n\t\t\t\tdon't recommend that you rely on INACTIVE
task definitions persisting\n\t\t\t\tbeyond the lifecycle of any associated tasks and services.
Deregisters the specified task definition by family and revision. Upon deregistration,\n\t\t\tthe task definition is marked as INACTIVE
. Existing tasks and services that\n\t\t\treference an INACTIVE
task definition continue to run without disruption.\n\t\t\tExisting services that reference an INACTIVE
task definition can still\n\t\t\tscale up or down by modifying the service's desired count.
You can't use an INACTIVE
task definition to run new tasks or create new\n\t\t\tservices, and you can't update an existing service to reference an INACTIVE
\n\t\t\ttask definition. However, there may be up to a 10-minute window following deregistration\n\t\t\twhere these restrictions have not yet taken effect.
At this time, INACTIVE
task definitions remain discoverable in your\n\t\t\t\taccount indefinitely. However, this behavior is subject to change in the future. We\n\t\t\t\tdon't recommend that you rely on INACTIVE
task definitions persisting\n\t\t\t\tbeyond the lifecycle of any associated tasks and services.
The nextToken
value returned from a previous paginated\n\t\t\t\tDescribeCapacityProviders
request where maxResults
was\n\t\t\tused and the results exceeded the value of that parameter. Pagination continues from the\n\t\t\tend of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n\t\t\t\tDescribeCapacityProviders
request where maxResults
was\n\t\t\tused and the results exceeded the value of that parameter. Pagination continues from the\n\t\t\tend of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nDetermines whether to include additional information about the clusters in the\n\t\t\tresponse. If this field is omitted, this information isn't included.
\nIf ATTACHMENTS
is specified, the attachments for the container instances\n\t\t\tor tasks within the cluster are included, for example the capacity providers.
If SETTINGS
is specified, the settings for the cluster are\n\t\t\tincluded.
If CONFIGURATIONS
is specified, the configuration for the cluster is\n\t\t\tincluded.
If STATISTICS
is specified, the task and service count is included,\n\t\t\tseparated by launch type.
If TAGS
is specified, the metadata tags associated with the cluster are\n\t\t\tincluded.
Determines whether to include additional information about the clusters in the\n\t\t\tresponse. If this field is omitted, this information isn't included.
\n\t\tIf ATTACHMENTS
is specified, the attachments for the container instances\n\t\t\tor tasks within the cluster are included, for example the capacity providers.
If SETTINGS
is specified, the settings for the cluster are\n\t\t\tincluded.
If CONFIGURATIONS
is specified, the configuration for the cluster is\n\t\t\tincluded.
If STATISTICS
is specified, the task and service count is included,\n\t\t\tseparated by launch type.
If TAGS
is specified, the metadata tags associated with the cluster are\n\t\t\tincluded.
Describes a task definition. You can specify a family
and\n\t\t\t\trevision
to find information about a specific task definition, or you\n\t\t\tcan simply specify the family to find the latest ACTIVE
revision in that\n\t\t\tfamily.
You can only describe INACTIVE
task definitions while an active task\n\t\t\t\tor service references them.
Describes a task definition. You can specify a family
and\n\t\t\t\trevision
to find information about a specific task definition, or you\n\t\t\tcan simply specify the family to find the latest ACTIVE
revision in that\n\t\t\tfamily.
You can only describe INACTIVE
task definitions while an active task\n\t\t\t\tor service references them.
The metadata that's applied to the task definition to help you categorize and organize\n\t\t\tthem. Each tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that's applied to the task definition to help you categorize and organize\n\t\t\tthem. Each tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
Describes a specified task or tasks.
\nCurrently, stopped tasks appear in the returned results for at least one hour.
", + "smithy.api#documentation": "Describes a specified task or tasks.
\n\t\tCurrently, stopped tasks appear in the returned results for at least one hour.
", "smithy.waiters#waitable": { "TasksRunning": { "acceptors": [ @@ -5856,7 +5828,7 @@ } ], "traits": { - "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nReturns an endpoint for the Amazon ECS agent to poll for updates.
" + "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nReturns an endpoint for the Amazon ECS agent to poll for updates.
" } }, "com.amazonaws.ecs#DiscoverPollEndpointRequest": { @@ -5920,7 +5892,7 @@ "autoprovision": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "If this value is true
, the Docker volume is created if it doesn't already\n\t\t\texist.
This field is only used if the scope
is shared
.
If this value is true
, the Docker volume is created if it doesn't already\n\t\t\texist.
This field is only used if the scope
is shared
.
The directory within the Amazon EFS file system to mount as the root directory inside the\n\t\t\thost. If this parameter is omitted, the root of the Amazon EFS volume will be used.\n\t\t\tSpecifying /
will have the same effect as omitting this parameter.
If an EFS access point is specified in the authorizationConfig
, the\n\t\t\t\troot directory parameter must either be omitted or set to /
which will\n\t\t\t\tenforce the path set on the EFS access point.
The directory within the Amazon EFS file system to mount as the root directory inside the\n\t\t\thost. If this parameter is omitted, the root of the Amazon EFS volume will be used.\n\t\t\tSpecifying /
will have the same effect as omitting this parameter.
If an EFS access point is specified in the authorizationConfig
, the\n\t\t\t\troot directory parameter must either be omitted or set to /
which will\n\t\t\t\tenforce the path set on the EFS access point.
A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env
file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored. For more information about the environment variable file\n\t\t\tsyntax, see Declare default\n\t\t\t\tenvironment variables in file.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying environment\n\t\t\t\tvariables in the Amazon Elastic Container Service Developer Guide.
This parameter is only supported for tasks hosted on Fargate using the\n\t\t\tfollowing platform versions:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
A list of files containing the environment variables to pass to a container. You can\n\t\t\tspecify up to ten environment files. The file must have a .env
file\n\t\t\textension. Each line in an environment file should contain an environment variable in\n\t\t\t\tVARIABLE=VALUE
format. Lines beginning with #
are treated\n\t\t\tas comments and are ignored. For more information about the environment variable file\n\t\t\tsyntax, see Declare default\n\t\t\t\tenvironment variables in file.
If there are environment variables specified using the environment
\n\t\t\tparameter in a container definition, they take precedence over the variables contained\n\t\t\twithin an environment file. If multiple environment files are specified that contain the\n\t\t\tsame variable, they're processed from the top down. We recommend that you use unique\n\t\t\tvariable names. For more information, see Specifying environment\n\t\t\t\tvariables in the Amazon Elastic Container Service Developer Guide.
This parameter is only supported for tasks hosted on Fargate using the\n\t\t\tfollowing platform versions:
\n\t\tLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.
\nThis parameter is only supported for tasks hosted on Fargate using\n\t\t\t\tLinux platform version 1.4.0
or later. This parameter is not supported\n\t\t\t\tfor Windows containers on Fargate.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.
\n\t\tThis parameter is only supported for tasks hosted on Fargate using\n\t\t\t\tLinux platform version 1.4.0
or later. This parameter is not supported\n\t\t\t\tfor Windows containers on Fargate.
Runs a command remotely on a container within a task.
\nIf you use a condition key in your IAM policy to refine the conditions for the policy\n\t\t\tstatement, for example limit the actions to a specific cluster, you receive an\n\t\t\t\tAccessDeniedException
when there is a mismatch between the condition\n\t\t\tkey value and the corresponding parameter value.
For information about required permissions and considerations, see Using Amazon ECS Exec for\n\t\t\tdebugging in the Amazon ECS Developer Guide.
" + "smithy.api#documentation": "Runs a command remotely on a container within a task.
\n\t\tIf you use a condition key in your IAM policy to refine the conditions for the policy\n\t\t\tstatement, for example limit the actions to a specific cluster, you receive an\n\t\t\t\tAccessDeniedException
when there is a mismatch between the condition\n\t\t\tkey value and the corresponding parameter value.
The log setting to use for redirecting logs for your execute command results. The\n\t\t\tfollowing log settings are available.
\n\n NONE
: The execute command session is not logged.
\n DEFAULT
: The awslogs
configuration in the task\n\t\t\t\t\tdefinition is used. If no logging parameter is specified, it defaults to this\n\t\t\t\t\tvalue. If no awslogs
log driver is configured in the task\n\t\t\t\t\tdefinition, the output won't be logged.
\n OVERRIDE
: Specify the logging details as a part of\n\t\t\t\t\t\tlogConfiguration
. If the OVERRIDE
logging option\n\t\t\t\t\tis specified, the logConfiguration
is required.
The log setting to use for redirecting logs for your execute command results. The\n\t\t\tfollowing log settings are available.
\n\t\t\n NONE
: The execute command session is not logged.
\n DEFAULT
: The awslogs
configuration in the task\n\t\t\t\t\tdefinition is used. If no logging parameter is specified, it defaults to this\n\t\t\t\t\tvalue. If no awslogs
log driver is configured in the task\n\t\t\t\t\tdefinition, the output won't be logged.
\n OVERRIDE
: Specify the logging details as a part of\n\t\t\t\t\t\tlogConfiguration
. If the OVERRIDE
logging option\n\t\t\t\t\tis specified, the logConfiguration
is required.
The name of the CloudWatch log group to send logs to.
\nThe CloudWatch log group must already be created.
\nThe name of the CloudWatch log group to send logs to.
\n\t\tThe CloudWatch log group must already be created.
\n\t\tThe name of the S3 bucket to send logs to.
\nThe S3 bucket must already be created.
\nThe name of the S3 bucket to send logs to.
\n\t\tThe S3 bucket must already be created.
\n\t\tThe authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon ECS API\n\t\t\t\tReference.
\nFor more information and the input format, see Amazon FSx for Windows File Server Volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The authorization configuration details for Amazon FSx for Windows File Server file system. See FSxWindowsFileServerVolumeConfiguration in the Amazon ECS API\n\t\t\t\tReference.
\n\t\tFor more information and the input format, see Amazon FSx for Windows File Server Volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#FSxWindowsFileServerVolumeConfiguration": { @@ -6356,7 +6328,7 @@ } }, "traits": { - "smithy.api#documentation": "This parameter is specified when you're using Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.
\nFor more information and the input format, see Amazon FSx for Windows File Server volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "This parameter is specified when you're using Amazon FSx for Windows File Server file system for task\n\t\t\tstorage.
\n\t\tFor more information and the input format, see Amazon FSx for Windows File Server volumes\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#Failure": { @@ -6404,7 +6376,7 @@ "options": { "target": "com.amazonaws.ecs#FirelensConfigurationOptionsMap", "traits": { - "smithy.api#documentation": "The options to use when configuring the log router. This field is optional and can be\n\t\t\tused to specify a custom configuration file or to add additional metadata, such as the\n\t\t\ttask, task definition, cluster, and container instance details to the log event. If\n\t\t\tspecified, the syntax to use is\n\t\t\t\t\"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}
.\n\t\t\tFor more information, see Creating\n\t\t\t\ta task definition that uses a FireLens configuration in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
Tasks hosted on Fargate only support the file
configuration file\n\t\t\t\ttype.
The options to use when configuring the log router. This field is optional and can be\n\t\t\tused to specify a custom configuration file or to add additional metadata, such as the\n\t\t\ttask, task definition, cluster, and container instance details to the log event. If\n\t\t\tspecified, the syntax to use is\n\t\t\t\t\"options\":{\"enable-ecs-log-metadata\":\"true|false\",\"config-file-type:\"s3|file\",\"config-file-value\":\"arn:aws:s3:::mybucket/fluent.conf|filepath\"}
.\n\t\t\tFor more information, see Creating\n\t\t\t\ta task definition that uses a FireLens configuration in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
Tasks hosted on Fargate only support the file
configuration file\n\t\t\t\ttype.
A list of tasks with the following information.
\n\n taskArn
: The task ARN.
\n protectionEnabled
: The protection status of the task. If scale-in\n\t\t\t\t\tprotection is enabled for a task, the value is true
. Otherwise, it\n\t\t\t\t\tis false
.
\n expirationDate
: The epoch time when protection for the task will\n\t\t\t\t\texpire.
A list of tasks with the following information.
\n\t\t\n taskArn
: The task ARN.
\n protectionEnabled
: The protection status of the task. If scale-in\n\t\t\t\t\tprotection is enabled for a task, the value is true
. Otherwise, it\n\t\t\t\t\tis false
.
\n expirationDate
: The epoch time when protection for the task will\n\t\t\t\t\texpire.
A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD
to run the command arguments\n\t\t\tdirectly, or CMD-SHELL
to run the command with the container's default\n\t\t\tshell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in brackets.
\n\n [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
\n
You don't need to include the brackets when you use the Amazon Web Services Management Console.
\n\n \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\"
\n
An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck
in the Create a container\n\t\t\tsection of the Docker Remote API.
A string array representing the command that the container runs to determine if it is\n\t\t\thealthy. The string array must start with CMD
to run the command arguments\n\t\t\tdirectly, or CMD-SHELL
to run the command with the container's default\n\t\t\tshell.
When you use the Amazon Web Services Management Console JSON panel, the Command Line Interface, or the APIs, enclose the list\n\t\t\tof commands in brackets.
\n\t\t\n\t\t\t [ \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\" ]
\n\t\t
You don't need to include the brackets when you use the Amazon Web Services Management Console.
\n\t\t\n\t\t\t \"CMD-SHELL\", \"curl -f http://localhost/ || exit 1\"
\n\t\t
An exit code of 0 indicates success, and non-zero exit code indicates failure. For\n\t\t\tmore information, see HealthCheck
in the Create a container\n\t\t\tsection of the Docker Remote API.
The optional grace period to provide containers time to bootstrap before failed health\n\t\t\tchecks count towards the maximum number of retries. You can specify between 0 and 300\n\t\t\tseconds. By default, the startPeriod
is disabled.
If a health check succeeds within the startPeriod
, then the container\n\t\t\t\tis considered healthy and any subsequent failures count toward the maximum number of\n\t\t\t\tretries.
The optional grace period to provide containers time to bootstrap before failed health\n\t\t\tchecks count towards the maximum number of retries. You can specify between 0 and 300\n\t\t\tseconds. By default, the startPeriod
is disabled.
If a health check succeeds within the startPeriod
, then the container\n\t\t\t\tis considered healthy and any subsequent failures count toward the maximum number of\n\t\t\t\tretries.
An object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile).
\nThe Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.
\nYou can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.
\nThe following describes the possible healthStatus
values for a\n\t\t\tcontainer:
\n HEALTHY
-The container health check has passed\n\t\t\t\t\tsuccessfully.
\n UNHEALTHY
-The container health check has failed.
\n UNKNOWN
-The container health check is being evaluated or\n\t\t\t\t\tthere's no container health check defined.
The following describes the possible healthStatus
values for a task. The\n\t\t\tcontainer health check status of nonessential containers only affects the health status\n\t\t\tof a task if no essential containers have health checks defined.
\n HEALTHY
-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.
\n UNHEALTHY
-One or more essential containers have failed\n\t\t\t\t\ttheir health check.
\n UNKNOWN
-The essential containers within the task are still\n\t\t\t\t\thaving their health checks evaluated or there are only nonessential containers\n\t\t\t\t\twith health checks defined.
If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.
\nFor tasks that are a part of a service and the service uses the ECS
\n\t\t\t\trolling deployment type, the deployment is paused while the new tasks have the\n\t\t\t\t\tUNKNOWN
task health check status. For example, tasks that define\n\t\t\t\thealth checks for nonessential containers when no essential containers have health\n\t\t\t\tchecks will have the UNKNOWN
health check status indefinitely which\n\t\t\t\tprevents the deployment from completing.
The following are notes about container health check support:
\nContainer health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.
\nContainer health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0
or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.
Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.
\nAn object representing a container health check. Health check parameters that are\n\t\t\tspecified in a container definition override any Docker health checks that exist in the\n\t\t\tcontainer image (such as those specified in a parent image or from the image's\n\t\t\tDockerfile).
\n\t\tThe Amazon ECS container agent only monitors and reports on the health checks specified\n\t\t\t\tin the task definition. Amazon ECS does not monitor Docker health checks that are\n\t\t\t\tembedded in a container image and not specified in the container definition. Health\n\t\t\t\tcheck parameters that are specified in a container definition override any Docker\n\t\t\t\thealth checks that exist in the container image.
\n\t\tYou can view the health status of both individual containers and a task with the\n\t\t\tDescribeTasks API operation or when viewing the task details in the console.
\n\t\tThe following describes the possible healthStatus
values for a\n\t\t\tcontainer:
\n HEALTHY
-The container health check has passed\n\t\t\t\t\tsuccessfully.
\n UNHEALTHY
-The container health check has failed.
\n UNKNOWN
-The container health check is being evaluated or\n\t\t\t\t\tthere's no container health check defined.
The following describes the possible healthStatus
values for a task. The\n\t\t\tcontainer health check status of nonessential containers only affects the health status\n\t\t\tof a task if no essential containers have health checks defined.
\n HEALTHY
-All essential containers within the task have\n\t\t\t\t\tpassed their health checks.
\n UNHEALTHY
-One or more essential containers have failed\n\t\t\t\t\ttheir health check.
\n UNKNOWN
-The essential containers within the task are still\n\t\t\t\t\thaving their health checks evaluated or there are only nonessential containers\n\t\t\t\t\twith health checks defined.
If a task is run manually, and not as part of a service, the task will continue its\n\t\t\tlifecycle regardless of its health status. For tasks that are part of a service, if the\n\t\t\ttask reports as unhealthy then the task will be stopped and the service scheduler will\n\t\t\treplace it.
\n\t\tFor tasks that are a part of a service and the service uses the ECS
\n\t\t\t\trolling deployment type, the deployment is paused while the new tasks have the\n\t\t\t\t\tUNKNOWN
task health check status. For example, tasks that define\n\t\t\t\thealth checks for nonessential containers when no essential containers have health\n\t\t\t\tchecks will have the UNKNOWN
health check status indefinitely which\n\t\t\t\tprevents the deployment from completing.
The following are notes about container health check support:
\n\t\tContainer health checks require version 1.17.0 or greater of the Amazon ECS\n\t\t\t\t\tcontainer agent. For more information, see Updating the\n\t\t\t\t\t\tAmazon ECS container agent.
\n\t\t\tContainer health checks are supported for Fargate tasks if\n\t\t\t\t\tyou're using platform version 1.1.0
or greater. For more\n\t\t\t\t\tinformation, see Fargate\n\t\t\t\t\t\tplatform versions.
Container health checks aren't supported for tasks that are part of a service\n\t\t\t\t\tthat's configured to use a Classic Load Balancer.
\n\t\t\tWhen the host
parameter is used, specify a sourcePath
to\n\t\t\tdeclare the path on the host container instance that's presented to the container. If\n\t\t\tthis parameter is empty, then the Docker daemon has assigned a host path for you. If the\n\t\t\t\thost
parameter contains a sourcePath
file location, then\n\t\t\tthe data volume persists at the specified location on the host container instance until\n\t\t\tyou delete it manually. If the sourcePath
value doesn't exist on the host\n\t\t\tcontainer instance, the Docker daemon creates it. If the location does exist, the\n\t\t\tcontents of the source path folder are exported.
If you're using the Fargate launch type, the sourcePath
\n\t\t\tparameter is not supported.
When the host
parameter is used, specify a sourcePath
to\n\t\t\tdeclare the path on the host container instance that's presented to the container. If\n\t\t\tthis parameter is empty, then the Docker daemon has assigned a host path for you. If the\n\t\t\t\thost
parameter contains a sourcePath
file location, then\n\t\t\tthe data volume persists at the specified location on the host container instance until\n\t\t\tyou delete it manually. If the sourcePath
value doesn't exist on the host\n\t\t\tcontainer instance, the Docker daemon creates it. If the location does exist, the\n\t\t\tcontents of the source path folder are exported.
If you're using the Fargate launch type, the sourcePath
\n\t\t\tparameter is not supported.
The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-add
option to docker\n\t\t\t\trun.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel\n\t\t\t\tcapability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that have been added to the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapAdd
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-add
option to docker\n\t\t\t\trun.
Tasks launched on Fargate only support adding the SYS_PTRACE
kernel\n\t\t\t\tcapability.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-drop
option to docker\n\t\t\t\trun.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that have been removed from the default\n\t\t\tconfiguration provided by Docker. This parameter maps to CapDrop
in the\n\t\t\tCreate a container section of the Docker Remote API and the\n\t\t\t\t--cap-drop
option to docker\n\t\t\t\trun.
Valid values: \"ALL\" | \"AUDIT_CONTROL\" | \"AUDIT_WRITE\" | \"BLOCK_SUSPEND\" |\n\t\t\t\t\"CHOWN\" | \"DAC_OVERRIDE\" | \"DAC_READ_SEARCH\" | \"FOWNER\" | \"FSETID\" | \"IPC_LOCK\" |\n\t\t\t\t\"IPC_OWNER\" | \"KILL\" | \"LEASE\" | \"LINUX_IMMUTABLE\" | \"MAC_ADMIN\" | \"MAC_OVERRIDE\" |\n\t\t\t\t\"MKNOD\" | \"NET_ADMIN\" | \"NET_BIND_SERVICE\" | \"NET_BROADCAST\" | \"NET_RAW\" | \"SETFCAP\"\n\t\t\t\t| \"SETGID\" | \"SETPCAP\" | \"SETUID\" | \"SYS_ADMIN\" | \"SYS_BOOT\" | \"SYS_CHROOT\" |\n\t\t\t\t\"SYS_MODULE\" | \"SYS_NICE\" | \"SYS_PACCT\" | \"SYS_PTRACE\" | \"SYS_RAWIO\" |\n\t\t\t\t\"SYS_RESOURCE\" | \"SYS_TIME\" | \"SYS_TTY_CONFIG\" | \"SYSLOG\" |\n\t\t\t\"WAKE_ALARM\"
\n
The Linux capabilities for the container that are added to or dropped from the default\n\t\t\tconfiguration provided by Docker.
\nFor tasks that use the Fargate launch type,\n\t\t\t\t\tcapabilities
is supported for all platform versions but the\n\t\t\t\t\tadd
parameter is only supported if using platform version 1.4.0 or\n\t\t\t\tlater.
The Linux capabilities for the container that are added to or dropped from the default\n\t\t\tconfiguration provided by Docker.
\n\t\tFor tasks that use the Fargate launch type,\n\t\t\t\t\tcapabilities
is supported for all platform versions but the\n\t\t\t\t\tadd
parameter is only supported if using platform version 1.4.0 or\n\t\t\t\tlater.
Any host devices to expose to the container. This parameter maps to\n\t\t\t\tDevices
in the Create a container section of the\n\t\t\tDocker Remote API and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices
parameter isn't supported.
Any host devices to expose to the container. This parameter maps to\n\t\t\t\tDevices
in the Create a container section of the\n\t\t\tDocker Remote API and the --device
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tdevices
parameter isn't supported.
The value for the size (in MiB) of the /dev/shm
volume. This parameter\n\t\t\tmaps to the --shm-size
option to docker\n\t\t\t\trun.
If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize
parameter is not supported.
The value for the size (in MiB) of the /dev/shm
volume. This parameter\n\t\t\tmaps to the --shm-size
option to docker\n\t\t\t\trun.
If you are using tasks that use the Fargate launch type, the\n\t\t\t\t\tsharedMemorySize
parameter is not supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs
parameter isn't supported.
The container path, mount options, and size (in MiB) of the tmpfs mount. This\n\t\t\tparameter maps to the --tmpfs
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\ttmpfs
parameter isn't supported.
The total amount of swap memory (in MiB) a container can use. This parameter will be\n\t\t\ttranslated to the --memory-swap
option to docker run where the value would be the sum of\n\t\t\tthe container memory plus the maxSwap
value.
If a maxSwap
value of 0
is specified, the container will not\n\t\t\tuse swap. Accepted values are 0
or any positive integer. If the\n\t\t\t\tmaxSwap
parameter is omitted, the container will use the swap\n\t\t\tconfiguration for the container instance it is running on. A maxSwap
value\n\t\t\tmust be set for the swappiness
parameter to be used.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tmaxSwap
parameter isn't supported.
The total amount of swap memory (in MiB) a container can use. This parameter will be\n\t\t\ttranslated to the --memory-swap
option to docker run where the value would be the sum of\n\t\t\tthe container memory plus the maxSwap
value.
If a maxSwap
value of 0
is specified, the container will not\n\t\t\tuse swap. Accepted values are 0
or any positive integer. If the\n\t\t\t\tmaxSwap
parameter is omitted, the container will use the swap\n\t\t\tconfiguration for the container instance it is running on. A maxSwap
value\n\t\t\tmust be set for the swappiness
parameter to be used.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tmaxSwap
parameter isn't supported.
This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness
value of 0
will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness
value of 100
will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0
and 100
. If the swappiness
parameter is not\n\t\t\tspecified, a default value of 60
is used. If a value is not specified for\n\t\t\t\tmaxSwap
then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness
parameter isn't supported.
This allows you to tune a container's memory swappiness behavior. A\n\t\t\t\tswappiness
value of 0
will cause swapping to not happen\n\t\t\tunless absolutely necessary. A swappiness
value of 100
will\n\t\t\tcause pages to be swapped very aggressively. Accepted values are whole numbers between\n\t\t\t\t0
and 100
. If the swappiness
parameter is not\n\t\t\tspecified, a default value of 60
is used. If a value is not specified for\n\t\t\t\tmaxSwap
then this parameter is ignored. This parameter maps to the\n\t\t\t\t--memory-swappiness
option to docker run.
If you're using tasks that use the Fargate launch type, the\n\t\t\t\t\tswappiness
parameter isn't supported.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If\n\t\t\tthis field is omitted, the account settings are listed only for the authenticated\n\t\t\tuser.
\nFederated users assume the account setting of the root user and can't have\n\t\t\t\texplicit account settings set for them.
\nThe ARN of the principal, which can be an IAM user, IAM role, or the root user. If\n\t\t\tthis field is omitted, the account settings are listed only for the authenticated\n\t\t\tuser.
\n\t\tFederated users assume the account setting of the root user and can't have\n\t\t\t\texplicit account settings set for them.
\n\t\tThe nextToken
value returned from a ListAccountSettings
\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls will be needed. If maxResults
was provided, it's possible the number\n\t\t\tof results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListAccountSettings
\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls will be needed. If maxResults
was provided, it's possible the number\n\t\t\tof results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListAttributes
request\n\t\t\tindicating that more results are available to fulfill the request and further calls are\n\t\t\tneeded. If maxResults
was provided, it's possible the number of results to\n\t\t\tbe fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListAttributes
request\n\t\t\tindicating that more results are available to fulfill the request and further calls are\n\t\t\tneeded. If maxResults
was provided, it's possible the number of results to\n\t\t\tbe fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListClusters
request\n\t\t\tindicating that more results are available to fulfill the request and further calls are\n\t\t\tneeded. If maxResults
was provided, it's possible the number of results to\n\t\t\tbe fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListClusters
request\n\t\t\tindicating that more results are available to fulfill the request and further calls are\n\t\t\tneeded. If maxResults
was provided, it's possible the number of results to\n\t\t\tbe fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListContainerInstances
\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls are needed. If maxResults
was provided, it's possible the number of\n\t\t\tresults to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListContainerInstances
\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls are needed. If maxResults
was provided, it's possible the number of\n\t\t\tresults to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace to list the services in.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
", + "smithy.api#documentation": "The namespace name or full Amazon Resource Name (ARN) of the Cloud Map namespace to list the services in.
\n\t\tTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
", "smithy.api#required": {} } }, @@ -7354,7 +7326,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The nextToken
value returned from a ListServices
request\n\t\t\tindicating that more results are available to fulfill the request and further calls will\n\t\t\tbe needed. If maxResults
was provided, it is possible the number of results\n\t\t\tto be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListServices
request\n\t\t\tindicating that more results are available to fulfill the request and further calls will\n\t\t\tbe needed. If maxResults
was provided, it is possible the number of results\n\t\t\tto be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nReturns a list of task definition families that are registered to your account. This\n\t\t\tlist includes task definition families that no longer have any ACTIVE
task\n\t\t\tdefinition revisions.
You can filter out task definition families that don't contain any ACTIVE
\n\t\t\ttask definition revisions by setting the status
parameter to\n\t\t\t\tACTIVE
. You can also filter the results with the\n\t\t\t\tfamilyPrefix
parameter.
Returns a list of task definition families that are registered to your account. This\n\t\t\tlist includes task definition families that no longer have any ACTIVE
task\n\t\t\tdefinition revisions.
You can filter out task definition families that don't contain any ACTIVE
\n\t\t\ttask definition revisions by setting the status
parameter to\n\t\t\t\tACTIVE
. You can also filter the results with the\n\t\t\t\tfamilyPrefix
parameter.
The nextToken
value returned from a\n\t\t\t\tListTaskDefinitionFamilies
request indicating that more results are\n\t\t\tavailable to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults
was provided, it is possible the number of results to be\n\t\t\tfewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a\n\t\t\t\tListTaskDefinitionFamilies
request indicating that more results are\n\t\t\tavailable to fulfill the request and further calls will be needed. If\n\t\t\t\tmaxResults
was provided, it is possible the number of results to be\n\t\t\tfewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListTaskDefinitions
\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls will be needed. If maxResults
was provided, it is possible the number\n\t\t\tof results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListTaskDefinitions
\n\t\t\trequest indicating that more results are available to fulfill the request and further\n\t\t\tcalls will be needed. If maxResults
was provided, it is possible the number\n\t\t\tof results to be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nReturns a list of tasks. You can filter the results by cluster, task definition\n\t\t\tfamily, container instance, launch type, what IAM principal started the task, or by the\n\t\t\tdesired status of the task.
\nRecently stopped tasks might appear in the returned results. Currently, stopped tasks\n\t\t\tappear in the returned results for at least one hour.
", + "smithy.api#documentation": "Returns a list of tasks. You can filter the results by cluster, task definition\n\t\t\tfamily, container instance, launch type, what IAM principal started the task, or by the\n\t\t\tdesired status of the task.
\n\t\tRecently stopped tasks might appear in the returned results. Currently, stopped tasks\n\t\t\tappear in the returned results for at least one hour.
", "smithy.api#paginated": { "inputToken": "nextToken", "outputToken": "nextToken", @@ -7658,7 +7630,7 @@ "nextToken": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The nextToken
value returned from a ListTasks
request\n\t\t\tindicating that more results are available to fulfill the request and further calls will\n\t\t\tbe needed. If maxResults
was provided, it's possible the number of results\n\t\t\tto be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a ListTasks
request\n\t\t\tindicating that more results are available to fulfill the request and further calls will\n\t\t\tbe needed. If maxResults
was provided, it's possible the number of results\n\t\t\tto be fewer than maxResults
.
This token should be treated as an opaque identifier that is only used to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe startedBy
value to filter the task results with. Specifying a\n\t\t\t\tstartedBy
value limits the results to tasks that were started with that\n\t\t\tvalue.
When you specify startedBy
as the filter, it must be the only filter that\n\t\t\tyou use.
The startedBy
value to filter the task results with. Specifying a\n\t\t\t\tstartedBy
value limits the results to tasks that were started with that\n\t\t\tvalue.
When you specify startedBy
as the filter, it must be the only filter that\n\t\t\tyou use.
The task desired status to use when filtering the ListTasks
results.\n\t\t\tSpecifying a desiredStatus
of STOPPED
limits the results to\n\t\t\ttasks that Amazon ECS has set the desired status to STOPPED
. This can be useful\n\t\t\tfor debugging tasks that aren't starting properly or have died or finished. The default\n\t\t\tstatus filter is RUNNING
, which shows tasks that Amazon ECS has set the desired\n\t\t\tstatus to RUNNING
.
Although you can filter results based on a desired status of PENDING
,\n\t\t\t\tthis doesn't return any results. Amazon ECS never sets the desired status of a task to\n\t\t\t\tthat value (only a task's lastStatus
may have a value of\n\t\t\t\t\tPENDING
).
The task desired status to use when filtering the ListTasks
results.\n\t\t\tSpecifying a desiredStatus
of STOPPED
limits the results to\n\t\t\ttasks that Amazon ECS has set the desired status to STOPPED
. This can be useful\n\t\t\tfor debugging tasks that aren't starting properly or have died or finished. The default\n\t\t\tstatus filter is RUNNING
, which shows tasks that Amazon ECS has set the desired\n\t\t\tstatus to RUNNING
.
Although you can filter results based on a desired status of PENDING
,\n\t\t\t\tthis doesn't return any results. Amazon ECS never sets the desired status of a task to\n\t\t\t\tthat value (only a task's lastStatus
may have a value of\n\t\t\t\t\tPENDING
).
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or\n\t\t\ttask set.
\nA target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a\n\t\t\tClassic Load Balancer, omit the target group ARN.
\nFor services using the ECS
deployment controller, you can specify one or\n\t\t\tmultiple target groups. For more information, see Registering multiple target groups with a service in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you're required\n\t\t\tto define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode, you\n\t\t\t\tmust choose ip
as the target type, not instance
. Do this\n\t\t\t\twhen creating your target groups because tasks that use the awsvpc
\n\t\t\t\tnetwork mode are associated with an elastic network interface, not an Amazon EC2\n\t\t\t\tinstance. This network mode is required for the Fargate launch\n\t\t\t\ttype.
The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or\n\t\t\ttask set.
\n\t\tA target group ARN is only specified when using an Application Load Balancer or Network Load Balancer. If you're using a\n\t\t\tClassic Load Balancer, omit the target group ARN.
\n\t\tFor services using the ECS
deployment controller, you can specify one or\n\t\t\tmultiple target groups. For more information, see Registering multiple target groups with a service in\n\t\t\tthe Amazon Elastic Container Service Developer Guide.
For services using the CODE_DEPLOY
deployment controller, you're required\n\t\t\tto define two target groups for the load balancer. For more information, see Blue/green deployment with CodeDeploy in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
If your service's task definition uses the awsvpc
network mode, you\n\t\t\t\tmust choose ip
as the target type, not instance
. Do this\n\t\t\t\twhen creating your target groups because tasks that use the awsvpc
\n\t\t\t\tnetwork mode are associated with an elastic network interface, not an Amazon EC2\n\t\t\t\tinstance. This network mode is required for the Fargate launch\n\t\t\t\ttype.
The name of the load balancer to associate with the Amazon ECS service or task set.
\nA load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer\n\t\t\tor a Network Load Balancer the load balancer name parameter should be omitted.
" + "smithy.api#documentation": "The name of the load balancer to associate with the Amazon ECS service or task set.
\n\t\tA load balancer name is only specified when using a Classic Load Balancer. If you are using an Application Load Balancer\n\t\t\tor a Network Load Balancer the load balancer name parameter should be omitted.
" } }, "containerName": { @@ -7739,7 +7711,7 @@ } }, "traits": { - "smithy.api#documentation": "The load balancer configuration to use with a service or task set.
\nFor specific notes and restrictions regarding the use of load balancers with services\n\t\t\tand task sets, see the CreateService and CreateTaskSet actions.
\nWhen you add, update, or remove a load balancer configuration, Amazon ECS starts a new\n\t\t\tdeployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and\n\t\t\tderegister from load balancers.
\nWe recommend that you verify this on a test environment before you update the Elastic Load Balancing\n\t\t\tconfiguration.
\nA service-linked role is required for services that use multiple target groups. For\n\t\t\tmore information, see Using\n\t\t\t\tservice-linked roles in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The load balancer configuration to use with a service or task set.
\n\t\tFor specific notes and restrictions regarding the use of load balancers with services\n\t\t\tand task sets, see the CreateService and CreateTaskSet actions.
\n\t\tWhen you add, update, or remove a load balancer configuration, Amazon ECS starts a new\n\t\t\tdeployment with the updated Elastic Load Balancing configuration. This causes tasks to register to and\n\t\t\tderegister from load balancers.
\n\t\tWe recommend that you verify this on a test environment before you update the Elastic Load Balancing\n\t\t\tconfiguration.
\n\t\tA service-linked role is required for services that use multiple target groups. For\n\t\t\tmore information, see Using\n\t\t\t\tservice-linked roles in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#LoadBalancers": { @@ -7754,14 +7726,14 @@ "logDriver": { "target": "com.amazonaws.ecs#LogDriver", "traits": { - "smithy.api#documentation": "The log driver to use for the container.
\nFor tasks on Fargate, the supported log drivers are awslogs
,\n\t\t\t\tsplunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs
, fluentd
, gelf
,\n\t\t\t\tjson-file
, journald
,\n\t\t\t\tlogentries
,syslog
, splunk
, and\n\t\t\t\tawsfirelens
.
For more information about using the awslogs
log driver, see Using\n\t\t\t\tthe awslogs log driver in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.
\nThe log driver to use for the container.
\n\t\tFor tasks on Fargate, the supported log drivers are awslogs
,\n\t\t\t\tsplunk
, and awsfirelens
.
For tasks hosted on Amazon EC2 instances, the supported log drivers are\n\t\t\t\tawslogs
, fluentd
, gelf
,\n\t\t\t\tjson-file
, journald
,\n\t\t\t\tlogentries
,syslog
, splunk
, and\n\t\t\t\tawsfirelens
.
For more information about using the awslogs
log driver, see Using\n\t\t\t\tthe awslogs log driver in the Amazon Elastic Container Service Developer Guide.
For more information about using the awsfirelens
log driver, see Custom log routing in the Amazon Elastic Container Service Developer Guide.
If you have a custom driver that isn't listed, you can fork the Amazon ECS container\n\t\t\t\tagent project that's available\n\t\t\t\t\ton GitHub and customize it to work with that driver. We encourage you to\n\t\t\t\tsubmit pull requests for changes that you would like to have included. However, we\n\t\t\t\tdon't currently provide support for running modified copies of this software.
\n\t\tThe configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
The configuration options to send to the log driver. This parameter requires version 1.19 of the Docker Remote API or greater on your container instance. To check the Docker Remote API version on your container instance, log in to your container instance and run the following command: sudo docker version --format '{{.Server.APIVersion}}'
\n
The log configuration for the container. This parameter maps to LogConfig
\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--log-driver
option to \n docker\n\t\t\t\t\trun
\n .
By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition. For more information\n\t\t\tabout the options for different supported log drivers, see Configure logging\n\t\t\t\tdrivers in the Docker documentation.
\nUnderstand the following when specifying a log configuration for your\n\t\t\tcontainers.
\nAmazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon (shown in the valid values below). Additional log drivers may be\n\t\t\t\t\tavailable in future releases of the Amazon ECS container agent.
\nThis parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.
\nFor tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.
\nThe log configuration for the container. This parameter maps to LogConfig
\n\t\t\tin the Create a container section of the Docker Remote API and the\n\t\t\t\t--log-driver
option to \n docker\n\t\t\t\t\trun
\n .
By default, containers use the same logging driver that the Docker daemon uses.\n\t\t\tHowever, the container might use a different logging driver than the Docker daemon by\n\t\t\tspecifying a log driver configuration in the container definition. For more information\n\t\t\tabout the options for different supported log drivers, see Configure logging\n\t\t\t\tdrivers in the Docker documentation.
\n\t\tUnderstand the following when specifying a log configuration for your\n\t\t\tcontainers.
\n\t\tAmazon ECS currently supports a subset of the logging drivers available to the\n\t\t\t\t\tDocker daemon (shown in the valid values below). Additional log drivers may be\n\t\t\t\t\tavailable in future releases of the Amazon ECS container agent.
\n\t\t\tThis parameter requires version 1.18 of the Docker Remote API or greater on\n\t\t\t\t\tyour container instance.
\n\t\t\tFor tasks that are hosted on Amazon EC2 instances, the Amazon ECS container agent must\n\t\t\t\t\tregister the available logging drivers with the\n\t\t\t\t\t\tECS_AVAILABLE_LOGGING_DRIVERS
environment variable before\n\t\t\t\t\tcontainers placed on that instance can use these log configuration options. For\n\t\t\t\t\tmore information, see Amazon ECS container agent configuration in the\n\t\t\t\t\tAmazon Elastic Container Service Developer Guide.
For tasks that are on Fargate, because you don't have access to the\n\t\t\t\t\tunderlying infrastructure your tasks are hosted on, any additional software\n\t\t\t\t\tneeded must be installed outside of the task. For example, the Fluentd output\n\t\t\t\t\taggregators or a remote host running Logstash to send Gelf logs to.
\n\t\t\tThe minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale\n\t\t\tin process is not affected by this parameter If this parameter is omitted, the default\n\t\t\tvalue of 1
is used.
When additional capacity is required, Amazon ECS will scale up the minimum scaling step\n\t\t\tsize even if the actual demand is less than the minimum scaling step size.
\nIf you use a capacity provider with an Auto Scaling group configured with more than\n\t\t\tone Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum\n\t\t\tscaling step size value and will ignore both the maximum scaling step size as well as\n\t\t\tthe capacity demand.
" + "smithy.api#documentation": "The minimum number of Amazon EC2 instances that Amazon ECS will scale out at one time. The scale\n\t\t\tin process is not affected by this parameter If this parameter is omitted, the default\n\t\t\tvalue of 1
is used.
When additional capacity is required, Amazon ECS will scale up the minimum scaling step\n\t\t\tsize even if the actual demand is less than the minimum scaling step size.
\n\t\tIf you use a capacity provider with an Auto Scaling group configured with more than\n\t\t\tone Amazon EC2 instance type or Availability Zone, Amazon ECS will scale up by the exact minimum\n\t\t\tscaling step size value and will ignore both the maximum scaling step size as well as\n\t\t\tthe capacity demand.
" } }, "maximumScalingStepSize": { @@ -7968,7 +7940,7 @@ } }, "traits": { - "smithy.api#documentation": "The managed scaling settings for the Auto Scaling group capacity provider.
\nWhen managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of\n\t\t\tthe Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS\n\t\t\tmanaged CloudWatch metric with the specified targetCapacity
value as the target\n\t\t\tvalue for the metric. For more information, see Using managed scaling in the Amazon Elastic Container Service Developer Guide.
If managed scaling is disabled, the user must manage the scaling of the Auto Scaling\n\t\t\tgroup.
" + "smithy.api#documentation": "The managed scaling settings for the Auto Scaling group capacity provider.
\n\t\tWhen managed scaling is enabled, Amazon ECS manages the scale-in and scale-out actions of\n\t\t\tthe Auto Scaling group. Amazon ECS manages a target tracking scaling policy using an Amazon ECS\n\t\t\tmanaged CloudWatch metric with the specified targetCapacity
value as the target\n\t\t\tvalue for the metric. For more information, see Using managed scaling in the Amazon Elastic Container Service Developer Guide.
If managed scaling is disabled, the user must manage the scaling of the Auto Scaling\n\t\t\tgroup.
" } }, "com.amazonaws.ecs#ManagedScalingInstanceWarmupPeriod": { @@ -8114,18 +8086,6 @@ "traits": { "smithy.api#documentation": "The protocol used for the network binding.
" } - }, - "containerPortRange": { - "target": "com.amazonaws.ecs#String", - "traits": { - "smithy.api#documentation": "The port number range on the container that's bound to the dynamically mapped host port range.
\nThe following rules apply when you specify a containerPortRange
:
You must use either the bridge
network mode or the awsvpc
\n\t\t\t\t\tnetwork mode.
This parameter is available for both the EC2 and Fargate launch types.
\nThis parameter is available for both the Linux and Windows operating systems.
\nThe container instance must have at least version 1.67.0 of the container agent\n\t\t\t\t\tand at least version 1.67.0-1 of the ecs-init
package
You can specify a maximum of 100 port ranges per container.
\nYou do not specify a hostPortRange
. The value of the hostPortRange
is set\n\t\t\t\t\tas follows:
For containers in a task with the awsvpc
network mode,\n\t\t\t\t\t\t\tthe hostPort
is set to the same value as the\n\t\t\t\t\t\t\t\tcontainerPort
. This is a static mapping\n\t\t\t\t\t\t\tstrategy.
For containers in a task with the bridge
network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.
The containerPortRange
valid values are between 1 and\n\t\t\t\t\t65535.
A port can only be included in one port mapping per container.
\nYou cannot specify overlapping port ranges.
\nThe first port in the range must be less than last port in the range.
\nDocker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.
\nFor more information, see Issue #11185 on the Github website.
\nFor information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.
\nYou can call \n DescribeTasks
\n to view the hostPortRange
which\n\t\t\tare the host ports that are bound to the container ports.
The port number range on the host that's used with the network binding. This is assigned is\n\t\t\tassigned by Docker and delivered by the Amazon ECS agent.
" - } } }, "traits": { @@ -8144,7 +8104,7 @@ "awsvpcConfiguration": { "target": "com.amazonaws.ecs#AwsVpcConfiguration", "traits": { - "smithy.api#documentation": "The VPC subnets and security groups that are associated with a task.
\nAll specified subnets and security groups must be from the same VPC.
\nThe VPC subnets and security groups that are associated with a task.
\n\t\tAll specified subnets and security groups must be from the same VPC.
\n\t\tAn object representing a constraint on task placement. For more information, see\n\t\t\t\tTask placement constraints in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
\nIf you're using the Fargate launch type, task placement constraints\n\t\t\t\taren't supported.
\nAn object representing a constraint on task placement. For more information, see\n\t\t\t\tTask placement constraints in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tIf you're using the Fargate launch type, task placement constraints\n\t\t\t\taren't supported.
\n\t\tThe port number on the container that's bound to the user-specified or automatically\n\t\t\tassigned host port.
\nIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, specify the exposed ports using containerPort
.
If you use containers in a task with the bridge
network mode and you\n\t\t\tspecify a container port and not a host port, your container automatically receives a\n\t\t\thost port in the ephemeral port range. For more information, see hostPort
.\n\t\t\tPort mappings that are automatically assigned in this way do not count toward the 100\n\t\t\treserved ports limit of a container instance.
The port number on the container that's bound to the user-specified or automatically\n\t\t\tassigned host port.
\n\t\tIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, specify the exposed ports using containerPort
.
If you use containers in a task with the bridge
network mode and you\n\t\t\tspecify a container port and not a host port, your container automatically receives a\n\t\t\thost port in the ephemeral port range. For more information, see hostPort
.\n\t\t\tPort mappings that are automatically assigned in this way do not count toward the 100\n\t\t\treserved ports limit of a container instance.
The port number on the container instance to reserve for your container.
\nIf you specify a containerPortRange
, leave this field empty and the value of\n\t\t\tthe hostPort
is set as follows:
For containers in a task with the awsvpc
network mode, the\n\t\t\t\t\t\thostPort
is set to the same value as the\n\t\t\t\t\t\tcontainerPort
. This is a static mapping strategy.
For containers in a task with the bridge
network mode, the Amazon ECS\n\t\t\t\t\tagent finds open ports on the host and automaticaly binds them to the container\n\t\t\t\t\tports. This is a dynamic mapping strategy.
If you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, the hostPort
can either be left blank or set to the same\n\t\t\tvalue as the containerPort
.
If you use containers in a task with the bridge
network mode, you can\n\t\t\tspecify a non-reserved host port for your container port mapping, or you can omit the\n\t\t\t\thostPort
(or set it to 0
) while specifying a\n\t\t\t\tcontainerPort
and your container automatically receives a port in the\n\t\t\tephemeral port range for your container instance operating system and Docker\n\t\t\tversion.
The default ephemeral port range for Docker version 1.6.0 and later is listed on the\n\t\t\tinstance under /proc/sys/net/ipv4/ip_local_port_range
. If this kernel\n\t\t\tparameter is unavailable, the default ephemeral port range from 49153 through 65535 is\n\t\t\tused. Do not attempt to specify a host port in the ephemeral port range as these are\n\t\t\treserved for automatic assignment. In general, ports below 32768 are outside of the\n\t\t\tephemeral port range.
The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the\n\t\t\tAmazon ECS container agent ports 51678-51680. Any host port that was previously specified in\n\t\t\ta running task is also reserved while the task is running. That is, after a task stops,\n\t\t\tthe host port is released. The current reserved ports are displayed in the\n\t\t\t\tremainingResources
of DescribeContainerInstances\n\t\t\toutput. A container instance can have up to 100 reserved ports at a time. This number\n\t\t\tincludes the default reserved ports. Automatically assigned ports aren't included in the\n\t\t\t100 reserved ports quota.
The port number on the container instance to reserve for your container.
\n\t\tIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, the hostPort
can either be left blank or set to the same\n\t\t\tvalue as the containerPort
.
If you use containers in a task with the bridge
network mode, you can\n\t\t\tspecify a non-reserved host port for your container port mapping, or you can omit the\n\t\t\t\thostPort
(or set it to 0
) while specifying a\n\t\t\t\tcontainerPort
and your container automatically receives a port in the\n\t\t\tephemeral port range for your container instance operating system and Docker\n\t\t\tversion.
The default ephemeral port range for Docker version 1.6.0 and later is listed on the\n\t\t\tinstance under /proc/sys/net/ipv4/ip_local_port_range
. If this kernel\n\t\t\tparameter is unavailable, the default ephemeral port range from 49153 through 65535 is\n\t\t\tused. Do not attempt to specify a host port in the ephemeral port range as these are\n\t\t\treserved for automatic assignment. In general, ports below 32768 are outside of the\n\t\t\tephemeral port range.
The default reserved ports are 22 for SSH, the Docker ports 2375 and 2376, and the\n\t\t\tAmazon ECS container agent ports 51678-51680. Any host port that was previously specified in\n\t\t\ta running task is also reserved while the task is running. That is, after a task stops,\n\t\t\tthe host port is released. The current reserved ports are displayed in the\n\t\t\t\tremainingResources
of DescribeContainerInstances\n\t\t\toutput. A container instance can have up to 100 reserved ports at a time. This number\n\t\t\tincludes the default reserved ports. Automatically assigned ports aren't included in the\n\t\t\t100 reserved ports quota.
The name that's used for the port mapping. This parameter only applies to\n\t\t\tService Connect. This parameter is the name that you use in the\n\t\t\t\tserviceConnectConfiguration
of a service. The name can include up to 64\n\t\t\tcharacters. The characters can include lowercase letters, numbers, underscores (_), and\n\t\t\thyphens (-). The name can't start with a hyphen.
For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The name that's used for the port mapping. This parameter only applies to\n\t\t\tService Connect. This parameter is the name that you use in the\n\t\t\t\tserviceConnectConfiguration
of a service. The name can include up to 64\n\t\t\tcharacters. The characters can include lowercase letters, numbers, underscores (_), and\n\t\t\thyphens (-). The name can't start with a hyphen.
For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "appProtocol": { "target": "com.amazonaws.ecs#ApplicationProtocol", "traits": { - "smithy.api#documentation": "The application protocol that's used for the port mapping. This parameter only applies\n\t\t\tto Service Connect. We recommend that you set this parameter to be consistent with the\n\t\t\tprotocol that your application uses. If you set this parameter, Amazon ECS adds\n\t\t\tprotocol-specific connection handling to the Service Connect proxy. If you set this\n\t\t\tparameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS console and CloudWatch.
\nIf you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't\n\t\t\tadd protocol-specific telemetry for TCP.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" - } - }, - "containerPortRange": { - "target": "com.amazonaws.ecs#String", - "traits": { - "smithy.api#documentation": "The port number range on the container that's bound to the dynamically mapped host port range.
\nThe following rules apply when you specify a containerPortRange
:
You must use either the bridge
network mode or the awsvpc
\n\t\t\t\t\tnetwork mode.
This parameter is available for both the EC2 and Fargate launch types.
\nThis parameter is available for both the Linux and Windows operating systems.
\nThe container instance must have at least version 1.67.0 of the container agent\n\t\t\t\t\tand at least version 1.67.0-1 of the ecs-init
package
You can specify a maximum of 100 port ranges per container.
\nYou do not specify a hostPortRange
. The value of the hostPortRange
is set\n\t\t\t\t\tas follows:
For containers in a task with the awsvpc
network mode,\n\t\t\t\t\t\t\tthe hostPort
is set to the same value as the\n\t\t\t\t\t\t\t\tcontainerPort
. This is a static mapping\n\t\t\t\t\t\t\tstrategy.
For containers in a task with the bridge
network mode, the Amazon ECS agent finds open host ports from the default ephemeral range and passes it to docker to bind them to the container ports.
The containerPortRange
valid values are between 1 and\n\t\t\t\t\t65535.
A port can only be included in one port mapping per container.
\nYou cannot specify overlapping port ranges.
\nThe first port in the range must be less than last port in the range.
\nDocker recommends that you turn off the docker-proxy in the Docker daemon config file when you have a large number of ports.
\nFor more information, see Issue #11185 on the Github website.
\nFor information about how to turn off the docker-proxy in the Docker daemon config file, see Docker daemon in the Amazon ECS Developer Guide.
\nYou can call \n DescribeTasks
\n to view the hostPortRange
which\n\t\t\tare the host ports that are bound to the container ports.
The application protocol that's used for the port mapping. This parameter only applies\n\t\t\tto Service Connect. We recommend that you set this parameter to be consistent with the\n\t\t\tprotocol that your application uses. If you set this parameter, Amazon ECS adds\n\t\t\tprotocol-specific connection handling to the Service Connect proxy. If you set this\n\t\t\tparameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS console and CloudWatch.
\n\t\tIf you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't\n\t\t\tadd protocol-specific telemetry for TCP.
\n\t\tTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } }, "traits": { - "smithy.api#documentation": "Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.
\nIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, specify the exposed ports using containerPort
. The\n\t\t\t\thostPort
can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort
.
You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.
\nAfter a task reaches the RUNNING
status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings
section of\n\t\t\t\tDescribeTasks API responses.
Port mappings allow containers to access ports on the host container instance to send\n\t\t\tor receive traffic. Port mappings are specified as part of the container\n\t\t\tdefinition.
\n\t\tIf you use containers in a task with the awsvpc
or host
\n\t\t\tnetwork mode, specify the exposed ports using containerPort
. The\n\t\t\t\thostPort
can be left blank or it must be the same value as the\n\t\t\t\tcontainerPort
.
You can't expose the same container port for multiple protocols. If you attempt\n\t\t\t\tthis, an error is returned.
\n\t\tAfter a task reaches the RUNNING
status, manual and automatic host and\n\t\t\tcontainer port assignments are visible in the networkBindings
section of\n\t\t\t\tDescribeTasks API responses.
The set of network configuration parameters to provide the Container Network Interface\n\t\t\t(CNI) plugin, specified as key-value pairs.
\n\n IgnoredUID
- (Required) The user ID (UID) of the proxy\n\t\t\t\t\tcontainer as defined by the user
parameter in a container\n\t\t\t\t\tdefinition. This is used to ensure the proxy ignores its own traffic. If\n\t\t\t\t\t\tIgnoredGID
is specified, this field can be empty.
\n IgnoredGID
- (Required) The group ID (GID) of the proxy\n\t\t\t\t\tcontainer as defined by the user
parameter in a container\n\t\t\t\t\tdefinition. This is used to ensure the proxy ignores its own traffic. If\n\t\t\t\t\t\tIgnoredUID
is specified, this field can be empty.
\n AppPorts
- (Required) The list of ports that the\n\t\t\t\t\tapplication uses. Network traffic to these ports is forwarded to the\n\t\t\t\t\t\tProxyIngressPort
and ProxyEgressPort
.
\n ProxyIngressPort
- (Required) Specifies the port that\n\t\t\t\t\tincoming traffic to the AppPorts
is directed to.
\n ProxyEgressPort
- (Required) Specifies the port that\n\t\t\t\t\toutgoing traffic from the AppPorts
is directed to.
\n EgressIgnoredPorts
- (Required) The egress traffic going to\n\t\t\t\t\tthe specified ports is ignored and not redirected to the\n\t\t\t\t\t\tProxyEgressPort
. It can be an empty list.
\n EgressIgnoredIPs
- (Required) The egress traffic going to\n\t\t\t\t\tthe specified IP addresses is ignored and not redirected to the\n\t\t\t\t\t\tProxyEgressPort
. It can be an empty list.
The set of network configuration parameters to provide the Container Network Interface\n\t\t\t(CNI) plugin, specified as key-value pairs.
\n\t\t\n IgnoredUID
- (Required) The user ID (UID) of the proxy\n\t\t\t\t\tcontainer as defined by the user
parameter in a container\n\t\t\t\t\tdefinition. This is used to ensure the proxy ignores its own traffic. If\n\t\t\t\t\t\tIgnoredGID
is specified, this field can be empty.
\n IgnoredGID
- (Required) The group ID (GID) of the proxy\n\t\t\t\t\tcontainer as defined by the user
parameter in a container\n\t\t\t\t\tdefinition. This is used to ensure the proxy ignores its own traffic. If\n\t\t\t\t\t\tIgnoredUID
is specified, this field can be empty.
\n AppPorts
- (Required) The list of ports that the\n\t\t\t\t\tapplication uses. Network traffic to these ports is forwarded to the\n\t\t\t\t\t\tProxyIngressPort
and ProxyEgressPort
.
\n ProxyIngressPort
- (Required) Specifies the port that\n\t\t\t\t\tincoming traffic to the AppPorts
is directed to.
\n ProxyEgressPort
- (Required) Specifies the port that\n\t\t\t\t\toutgoing traffic from the AppPorts
is directed to.
\n EgressIgnoredPorts
- (Required) The egress traffic going to\n\t\t\t\t\tthe specified ports is ignored and not redirected to the\n\t\t\t\t\t\tProxyEgressPort
. It can be an empty list.
\n EgressIgnoredIPs
- (Required) The egress traffic going to\n\t\t\t\t\tthe specified IP addresses is ignored and not redirected to the\n\t\t\t\t\t\tProxyEgressPort
. It can be an empty list.
The configuration details for the App Mesh proxy.
\nFor tasks that use the EC2 launch type, the container instances require\n\t\t\tat least version 1.26.0 of the container agent and at least version 1.26.0-1 of the\n\t\t\t\tecs-init
package to use a proxy configuration. If your container\n\t\t\tinstances are launched from the Amazon ECS optimized AMI version 20190301
or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init
. For more information, see Amazon ECS-optimized Linux AMI\n
The configuration details for the App Mesh proxy.
\n\t\tFor tasks that use the EC2 launch type, the container instances require\n\t\t\tat least version 1.26.0 of the container agent and at least version 1.26.0-1 of the\n\t\t\t\tecs-init
package to use a proxy configuration. If your container\n\t\t\tinstances are launched from the Amazon ECS optimized AMI version 20190301
or\n\t\t\tlater, then they contain the required versions of the container agent and\n\t\t\t\tecs-init
. For more information, see Amazon ECS-optimized Linux AMI\n\t\t
Modifies an account setting. Account settings are set on a per-Region basis.
\nIf you change the account setting for the root user, the default settings for all of\n\t\t\tthe IAM users and roles that no individual account setting was specified are reset for.\n\t\t\tFor more information, see Account\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.
\nWhen serviceLongArnFormat
, taskLongArnFormat
, or\n\t\t\t\tcontainerInstanceLongArnFormat
are specified, the Amazon Resource Name\n\t\t\t(ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or\n\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting must be\n\t\t\tset for each Amazon ECS resource separately. The ARN and resource ID format of a resource\n\t\t\tis defined by the opt-in status of the IAM user or role that created the resource. You\n\t\t\tmust turn on this setting to use Amazon ECS features such as resource tagging.
When awsvpcTrunking
is specified, the elastic network interface (ENI)\n\t\t\tlimit for any new container instances that support the feature is changed. If\n\t\t\t\tawsvpcTrunking
is enabled, any new container instances that support the\n\t\t\tfeature are launched have the increased ENI limits available to them. For more\n\t\t\tinformation, see Elastic Network\n\t\t\t\tInterface Trunking in the Amazon Elastic Container Service Developer Guide.
When containerInsights
is specified, the default setting indicating\n\t\t\twhether CloudWatch Container Insights is enabled for your clusters is changed. If\n\t\t\t\tcontainerInsights
is enabled, any new clusters that are created will\n\t\t\thave Container Insights enabled unless you disable it during cluster creation. For more\n\t\t\tinformation, see CloudWatch\n\t\t\t\tContainer Insights in the Amazon Elastic Container Service Developer Guide.
Modifies an account setting. Account settings are set on a per-Region basis.
\n\t\tIf you change the account setting for the root user, the default settings for all of\n\t\t\tthe IAM users and roles that no individual account setting was specified are reset for.\n\t\t\tFor more information, see Account\n\t\t\t\tSettings in the Amazon Elastic Container Service Developer Guide.
\n\t\tWhen serviceLongArnFormat
, taskLongArnFormat
, or\n\t\t\t\tcontainerInstanceLongArnFormat
are specified, the Amazon Resource Name\n\t\t\t(ARN) and resource ID format of the resource type for a specified IAM user, IAM role, or\n\t\t\tthe root user for an account is affected. The opt-in and opt-out account setting must be\n\t\t\tset for each Amazon ECS resource separately. The ARN and resource ID format of a resource\n\t\t\tis defined by the opt-in status of the IAM user or role that created the resource. You\n\t\t\tmust turn on this setting to use Amazon ECS features such as resource tagging.
When awsvpcTrunking
is specified, the elastic network interface (ENI)\n\t\t\tlimit for any new container instances that support the feature is changed. If\n\t\t\t\tawsvpcTrunking
is enabled, any new container instances that support the\n\t\t\tfeature are launched have the increased ENI limits available to them. For more\n\t\t\tinformation, see Elastic Network\n\t\t\t\tInterface Trunking in the Amazon Elastic Container Service Developer Guide.
When containerInsights
is specified, the default setting indicating\n\t\t\twhether CloudWatch Container Insights is enabled for your clusters is changed. If\n\t\t\t\tcontainerInsights
is enabled, any new clusters that are created will\n\t\t\thave Container Insights enabled unless you disable it during cluster creation. For more\n\t\t\tinformation, see CloudWatch\n\t\t\t\tContainer Insights in the Amazon Elastic Container Service Developer Guide.
The resource name for which to modify the account setting. If\n\t\t\t\tserviceLongArnFormat
is specified, the ARN for your Amazon ECS services is\n\t\t\taffected. If taskLongArnFormat
is specified, the ARN and resource ID for\n\t\t\tyour Amazon ECS tasks is affected. If containerInstanceLongArnFormat
is\n\t\t\tspecified, the ARN and resource ID for your Amazon ECS container instances is affected. If\n\t\t\t\tawsvpcTrunking
is specified, the ENI limit for your Amazon ECS container\n\t\t\tinstances is affected. If containerInsights
is specified, the default\n\t\t\tsetting for CloudWatch Container Insights for your clusters is affected.
Fargate is transitioning from task count-based quotas to vCPU-based quotas. You can\n\t\t\tset the name to fargateVCPULimit
to opt in or opt out of the vCPU-based\n\t\t\tquotas. For information about the opt in timeline, see Fargate vCPU-based quotas timeline in the\n\t\t\t\tAmazon ECS Developer Guide.
The resource name for which to modify the account setting. If\n\t\t\t\tserviceLongArnFormat
is specified, the ARN for your Amazon ECS services is\n\t\t\taffected. If taskLongArnFormat
is specified, the ARN and resource ID for\n\t\t\tyour Amazon ECS tasks is affected. If containerInstanceLongArnFormat
is\n\t\t\tspecified, the ARN and resource ID for your Amazon ECS container instances is affected. If\n\t\t\t\tawsvpcTrunking
is specified, the ENI limit for your Amazon ECS container\n\t\t\tinstances is affected. If containerInsights
is specified, the default\n\t\t\tsetting for CloudWatch Container Insights for your clusters is affected.
Fargate is transitioning from task count-based quotas to vCPU-based quotas. You can\n\t\t\tset the name to fargateVCPULimit
to opt in or opt out of the vCPU-based\n\t\t\tquotas. For information about the opt in timeline, see Fargate vCPU-based quotas timeline in the\n\t\t\t\tAmazon ECS Developer Guide.
The ARN of the principal, which can be an IAM user, IAM role, or the root user. If\n\t\t\tyou specify the root user, it modifies the account setting for all IAM users, IAM roles,\n\t\t\tand the root user of the account unless an IAM user or role explicitly overrides these\n\t\t\tsettings. If this field is omitted, the setting is changed only for the authenticated\n\t\t\tuser.
\nFederated users assume the account setting of the root user and can't have\n\t\t\t\texplicit account settings set for them.
\nThe ARN of the principal, which can be an IAM user, IAM role, or the root user. If\n\t\t\tyou specify the root user, it modifies the account setting for all IAM users, IAM roles,\n\t\t\tand the root user of the account unless an IAM user or role explicitly overrides these\n\t\t\tsettings. If this field is omitted, the setting is changed only for the authenticated\n\t\t\tuser.
\n\t\tFederated users assume the account setting of the root user and can't have\n\t\t\t\texplicit account settings set for them.
\n\t\tModifies the available capacity providers and the default capacity provider strategy\n\t\t\tfor a cluster.
\nYou must specify both the available capacity providers and a default capacity provider\n\t\t\tstrategy for the cluster. If the specified cluster has existing capacity providers\n\t\t\tassociated with it, you must specify all existing capacity providers in addition to any\n\t\t\tnew ones you want to add. Any existing capacity providers that are associated with a\n\t\t\tcluster that are omitted from a PutClusterCapacityProviders API call\n\t\t\twill be disassociated with the cluster. You can only disassociate an existing capacity\n\t\t\tprovider from a cluster if it's not being used by any existing tasks.
\nWhen creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified, then the cluster's default capacity provider strategy is used.\n\t\t\tWe recommend that you define a default capacity provider strategy for your cluster.\n\t\t\tHowever, you must specify an empty array ([]
) to bypass defining a default\n\t\t\tstrategy.
Modifies the available capacity providers and the default capacity provider strategy\n\t\t\tfor a cluster.
\n\t\tYou must specify both the available capacity providers and a default capacity provider\n\t\t\tstrategy for the cluster. If the specified cluster has existing capacity providers\n\t\t\tassociated with it, you must specify all existing capacity providers in addition to any\n\t\t\tnew ones you want to add. Any existing capacity providers that are associated with a\n\t\t\tcluster that are omitted from a PutClusterCapacityProviders API call\n\t\t\twill be disassociated with the cluster. You can only disassociate an existing capacity\n\t\t\tprovider from a cluster if it's not being used by any existing tasks.
\n\t\tWhen creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified, then the cluster's default capacity provider strategy is used.\n\t\t\tWe recommend that you define a default capacity provider strategy for your cluster.\n\t\t\tHowever, you must specify an empty array ([]
) to bypass defining a default\n\t\t\tstrategy.
The name of one or more capacity providers to associate with the cluster.
\nIf specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The name of one or more capacity providers to associate with the cluster.
\n\t\tIf specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\n\t\tTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The capacity provider strategy to use by default for the cluster.
\nWhen creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified then the default capacity provider strategy for the cluster is\n\t\t\tused.
\nA capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase
and weight
to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity\n\t\t\tprovider with a cluster. Only capacity providers with an ACTIVE
or\n\t\t\t\tUPDATING
status can be used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The capacity provider strategy to use by default for the cluster.
\n\t\tWhen creating a service or running a task on a cluster, if no capacity provider or\n\t\t\tlaunch type is specified then the default capacity provider strategy for the cluster is\n\t\t\tused.
\n\t\tA capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase
and weight
to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity\n\t\t\tprovider with a cluster. Only capacity providers with an ACTIVE
or\n\t\t\t\tUPDATING
status can be used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\n\t\tTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nRegisters an EC2 instance into the specified cluster. This instance becomes available\n\t\t\tto place containers on.
" + "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nRegisters an EC2 instance into the specified cluster. This instance becomes available\n\t\t\tto place containers on.
" } }, "com.amazonaws.ecs#RegisterContainerInstanceRequest": { @@ -8880,13 +8834,13 @@ "instanceIdentityDocument": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The instance identity document for the EC2 instance to register. This document can be\n\t\t\tfound by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/document/
\n
The instance identity document for the EC2 instance to register. This document can be\n\t\t\tfound by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/document/
\n\t\t
The instance identity document signature for the EC2 instance to register. This\n\t\t\tsignature can be found by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/signature/
\n
The instance identity document signature for the EC2 instance to register. This\n\t\t\tsignature can be found by running the following command from the instance: curl\n\t\t\t\thttp://169.254.169.254/latest/dynamic/instance-identity/signature/
\n\t\t
The metadata that you apply to the container instance to help you categorize and\n\t\t\torganize them. Each tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the container instance to help you categorize and\n\t\t\torganize them. Each tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
Registers a new task definition from the supplied family
and\n\t\t\t\tcontainerDefinitions
. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes
parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.
You can specify an IAM role for your task with the taskRoleArn
parameter.\n\t\t\tWhen you specify an IAM role for a task, its containers can then use the latest versions\n\t\t\tof the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in\n\t\t\tthe IAM policy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode
parameter. The available network modes correspond to\n\t\t\tthose described in Network\n\t\t\t\tsettings in the Docker run reference. If you specify the awsvpc
\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with\n\t\t\tthe task definition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
Registers a new task definition from the supplied family
and\n\t\t\t\tcontainerDefinitions
. Optionally, you can add data volumes to your\n\t\t\tcontainers with the volumes
parameter. For more information about task\n\t\t\tdefinition parameters and defaults, see Amazon ECS Task\n\t\t\t\tDefinitions in the Amazon Elastic Container Service Developer Guide.
You can specify an IAM role for your task with the taskRoleArn
parameter.\n\t\t\tWhen you specify an IAM role for a task, its containers can then use the latest versions\n\t\t\tof the CLI or SDKs to make API requests to the Amazon Web Services services that are specified in\n\t\t\tthe IAM policy that's associated with the role. For more information, see IAM\n\t\t\t\tRoles for Tasks in the Amazon Elastic Container Service Developer Guide.
You can specify a Docker networking mode for the containers in your task definition\n\t\t\twith the networkMode
parameter. The available network modes correspond to\n\t\t\tthose described in Network\n\t\t\t\tsettings in the Docker run reference. If you specify the awsvpc
\n\t\t\tnetwork mode, the task is allocated an elastic network interface, and you must specify a\n\t\t\t\tNetworkConfiguration when you create a service or run a task with\n\t\t\tthe task definition. For more information, see Task Networking\n\t\t\tin the Amazon Elastic Container Service Developer Guide.
The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
For more information, see Network\n settings in the Docker run reference.
" + "smithy.api#documentation": "The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
For more information, see Network\n settings in the Docker run reference.
" } }, "containerDefinitions": { @@ -9017,37 +8971,37 @@ "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The number of CPU units used by the task. It can be expressed as an integer using CPU\n\t\t\tunits (for example, 1024
) or as a string using vCPUs (for example, 1\n\t\t\t\tvCPU
or 1 vcpu
) in a task definition. String values are\n\t\t\tconverted to an integer indicating the CPU units when the task definition is\n\t\t\tregistered.
Task-level CPU and memory parameters are ignored for Windows containers. We\n\t\t\t\trecommend specifying container-level resources for Windows containers.
\nIf you're using the EC2 launch type, this field is optional. Supported\n\t\t\tvalues are between 128
CPU units (0.125
vCPUs) and\n\t\t\t\t10240
CPU units (10
vCPUs). If you do not specify a value,\n\t\t\tthe parameter is ignored.
If you're using the Fargate launch type, this field is required and you\n\t\t\tmust use one of the following values, which determines your range of supported values\n\t\t\tfor the memory
parameter:
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The number of CPU units used by the task. It can be expressed as an integer using CPU\n\t\t\tunits (for example, 1024
) or as a string using vCPUs (for example, 1\n\t\t\t\tvCPU
or 1 vcpu
) in a task definition. String values are\n\t\t\tconverted to an integer indicating the CPU units when the task definition is\n\t\t\tregistered.
Task-level CPU and memory parameters are ignored for Windows containers. We\n\t\t\t\trecommend specifying container-level resources for Windows containers.
\n\t\tIf you're using the EC2 launch type, this field is optional. Supported\n\t\t\tvalues are between 128
CPU units (0.125
vCPUs) and\n\t\t\t\t10240
CPU units (10
vCPUs). If you do not specify a value,\n\t\t\tthe parameter is ignored.
If you're using the Fargate launch type, this field is required and you\n\t\t\tmust use one of the following values, which determines your range of supported values\n\t\t\tfor the memory
parameter:
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n\t\t256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The amount of memory (in MiB) used by the task. It can be expressed as an integer\n\t\t\tusing MiB (for example ,1024
) or as a string using GB (for example,\n\t\t\t\t1GB
or 1 GB
) in a task definition. String values are\n\t\t\tconverted to an integer indicating the MiB when the task definition is\n\t\t\tregistered.
Task-level CPU and memory parameters are ignored for Windows containers. We\n\t\t\t\trecommend specifying container-level resources for Windows containers.
\nIf using the EC2 launch type, this field is optional.
\nIf using the Fargate launch type, this field is required and you must\n\t\t\tuse one of the following values. This determines your range of supported values for the\n\t\t\t\tcpu
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
Between 16 GB and 60 GB in 4 GB increments - Available cpu
values: 8192 (8 vCPU)
This option requires Linux platform 1.4.0
or\n later.
Between 32GB and 120 GB in 8 GB increments - Available cpu
values: 16384 (16 vCPU)
This option requires Linux platform 1.4.0
or\n later.
The amount of memory (in MiB) used by the task. It can be expressed as an integer\n\t\t\tusing MiB (for example ,1024
) or as a string using GB (for example,\n\t\t\t\t1GB
or 1 GB
) in a task definition. String values are\n\t\t\tconverted to an integer indicating the MiB when the task definition is\n\t\t\tregistered.
Task-level CPU and memory parameters are ignored for Windows containers. We\n\t\t\t\trecommend specifying container-level resources for Windows containers.
\n\t\tIf using the EC2 launch type, this field is optional.
\n\t\tIf using the Fargate launch type, this field is required and you must\n\t\t\tuse one of the following values. This determines your range of supported values for the\n\t\t\t\tcpu
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n\t\t512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
Between 16 GB and 60 GB in 4 GB increments - Available cpu
values: 8192 (8 vCPU)
This option requires Linux platform 1.4.0
or\n later.
Between 32GB and 120 GB in 8 GB increments - Available cpu
values: 16384 (16 vCPU)
This option requires Linux platform 1.4.0
or\n later.
The metadata that you apply to the task definition to help you categorize and organize\n\t\t\tthem. Each tag consists of a key and an optional value. You define both of them.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the task definition to help you categorize and organize\n\t\t\tthem. Each tag consists of a key and an optional value. You define both of them.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The process namespace to use for the containers in the task. The valid\n values are host
or task
. If host
\n is specified, then all containers within the tasks that specified the\n host
PID mode on the same container instance share the\n same process namespace with the host Amazon EC2 instance. If task
is\n specified, all containers within the specified task share the same\n process namespace. If no value is specified, the default is a private\n namespace. For more information, see PID settings in the Docker run\n reference.
If the host
PID mode is used, be aware that there is a\n heightened risk of undesired process namespace expose. For more\n information, see Docker\n security.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe process namespace to use for the containers in the task. The valid\n values are host
or task
. If host
\n is specified, then all containers within the tasks that specified the\n host
PID mode on the same container instance share the\n same process namespace with the host Amazon EC2 instance. If task
is\n specified, all containers within the specified task share the same\n process namespace. If no value is specified, the default is a private\n namespace. For more information, see PID settings in the Docker run\n reference.
If the host
PID mode is used, be aware that there is a\n heightened risk of undesired process namespace expose. For more\n information, see Docker\n security.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe configuration details for the App Mesh proxy.
\nFor tasks hosted on Amazon EC2 instances, the container instances require at least version\n\t\t\t\t1.26.0
of the container agent and at least version\n\t\t\t\t1.26.0-1
of the ecs-init
package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS-optimized\n\t\t\tAMI version 20190301
or later, then they contain the required versions of\n\t\t\tthe container agent and ecs-init
. For more information, see Amazon ECS-optimized AMI versions in the Amazon Elastic Container Service Developer Guide.
The configuration details for the App Mesh proxy.
\n\t\tFor tasks hosted on Amazon EC2 instances, the container instances require at least version\n\t\t\t\t1.26.0
of the container agent and at least version\n\t\t\t\t1.26.0-1
of the ecs-init
package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS-optimized\n\t\t\tAMI version 20190301
or later, then they contain the required versions of\n\t\t\tthe container agent and ecs-init
. For more information, see Amazon ECS-optimized AMI versions in the Amazon Elastic Container Service Developer Guide.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.
\nThis parameter is only supported for tasks hosted on Fargate using\n\t\t\t\tthe following platform versions:
\nLinux platform version 1.4.0
or later.
The amount of ephemeral storage to allocate for the task. This parameter is used to\n\t\t\texpand the total amount of ephemeral storage available, beyond the default amount, for\n\t\t\ttasks hosted on Fargate. For more information, see Fargate task\n\t\t\t\tstorage in the Amazon ECS User Guide for Fargate.
\n\t\tThis parameter is only supported for tasks hosted on Fargate using\n\t\t\t\tthe following platform versions:
\n\t\t\tLinux platform version 1.4.0
or later.
The operating system that your tasks definitions run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.
\nWhen you specify a task definition in a service, this value must match the\n\t\t\t\truntimePlatform
value of the service.
The operating system that your tasks definitions run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.
\n\t\tWhen you specify a task definition in a service, this value must match the\n\t\t\t\truntimePlatform
value of the service.
The Amazon Resource Name (ARN) of the secret containing the private repository\n\t\t\tcredentials.
\nWhen you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same\n\t\t\t\tRegion as the task that you're launching then you can use either the full ARN or\n\t\t\t\tthe name of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN\n\t\t\t\tof the secret.
\nThe Amazon Resource Name (ARN) of the secret containing the private repository\n\t\t\tcredentials.
\n\t\tWhen you use the Amazon ECS API, CLI, or Amazon Web Services SDK, if the secret exists in the same\n\t\t\t\tRegion as the task that you're launching then you can use either the full ARN or\n\t\t\t\tthe name of the secret. When you use the Amazon Web Services Management Console, you must specify the full ARN\n\t\t\t\tof the secret.
\n\t\tThe value for the specified resource type.
\nIf the GPU
type is used, the value is the number of physical\n\t\t\t\tGPUs
the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.
If the InferenceAccelerator
type is used, the value
matches\n\t\t\tthe deviceName
for an InferenceAccelerator specified in a\n\t\t\ttask definition.
The value for the specified resource type.
\n\t\tIf the GPU
type is used, the value is the number of physical\n\t\t\t\tGPUs
the Amazon ECS container agent reserves for the container. The number\n\t\t\tof GPUs that's reserved for all containers in a task can't exceed the number of\n\t\t\tavailable GPUs on the container instance that the task is launched on.
If the InferenceAccelerator
type is used, the value
matches\n\t\t\tthe deviceName
for an InferenceAccelerator specified in a\n\t\t\ttask definition.
Starts a new task using the specified task definition.
\nYou can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.
\nAlternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.
\nThe Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.
\nTo manage eventual consistency, you can do the following:
\nConfirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.
\nAdd wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.
\nStarts a new task using the specified task definition.
\n\t\tYou can allow Amazon ECS to place tasks for you, or you can customize how Amazon ECS places\n\t\t\ttasks using placement constraints and placement strategies. For more information, see\n\t\t\t\tScheduling Tasks in the Amazon Elastic Container Service Developer Guide.
\n\t\tAlternatively, you can use StartTask to use your own scheduler or\n\t\t\tplace tasks manually on specific container instances.
\n\t\tThe Amazon ECS API follows an eventual consistency model. This is because of the\n\t\t\tdistributed nature of the system supporting the API. This means that the result of an\n\t\t\tAPI command you run that affects your Amazon ECS resources might not be immediately visible\n\t\t\tto all subsequent commands you run. Keep this in mind when you carry out an API command\n\t\t\tthat immediately follows a previous API command.
\n\t\tTo manage eventual consistency, you can do the following:
\n\t\tConfirm the state of the resource before you run a command to modify it. Run\n\t\t\t\t\tthe DescribeTasks command using an exponential backoff algorithm to ensure that\n\t\t\t\t\tyou allow enough time for the previous command to propagate through the system.\n\t\t\t\t\tTo do this, run the DescribeTasks command repeatedly, starting with a couple of\n\t\t\t\t\tseconds of wait time and increasing gradually up to five minutes of wait\n\t\t\t\t\ttime.
\n\t\t\tAdd wait time between subsequent commands, even if the DescribeTasks command\n\t\t\t\t\treturns an accurate response. Apply an exponential backoff algorithm starting\n\t\t\t\t\twith a couple of seconds of wait time, and increase gradually up to about five\n\t\t\t\t\tminutes of wait time.
\n\t\t\tThe capacity provider strategy to use for the task.
\nIf a capacityProviderStrategy
is specified, the launchType
\n\t\t\tparameter must be omitted. If no capacityProviderStrategy
or\n\t\t\t\tlaunchType
is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy
for the cluster is used.
When you use cluster auto scaling, you must specify\n\t\t\t\tcapacityProviderStrategy
and not launchType
.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" + "smithy.api#documentation": "The capacity provider strategy to use for the task.
\n\t\tIf a capacityProviderStrategy
is specified, the launchType
\n\t\t\tparameter must be omitted. If no capacityProviderStrategy
or\n\t\t\t\tlaunchType
is specified, the\n\t\t\t\tdefaultCapacityProviderStrategy
for the cluster is used.
When you use cluster auto scaling, you must specify\n\t\t\t\tcapacityProviderStrategy
and not launchType
.
A capacity provider strategy may contain a maximum of 6 capacity providers.
" } }, "cluster": { @@ -9303,7 +9257,7 @@ "target": "com.amazonaws.ecs#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Determines whether to use the execute command functionality for the containers in this\n\t\t\ttask. If true
, this enables execute command functionality on all containers\n\t\t\tin the task.
If true
, then the task definition must have a task role, or you must\n\t\t\tprovide one as an override.
Determines whether to use the execute command functionality for the containers in this\n\t\t\ttask. If true
, this enables execute command functionality on all containers\n\t\t\tin the task.
If true
, then the task definition must have a task role, or you must\n\t\t\tprovide one as an override.
The infrastructure to run your standalone task on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\nThe FARGATE
launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.
Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS User Guide for Fargate.
\nThe EC2
launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.
A task can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
When you use cluster auto scaling, you must specify\n\t\t\t\tcapacityProviderStrategy
and not launchType
.
The infrastructure to run your standalone task on. For more information, see Amazon ECS\n\t\t\t\tlaunch types in the Amazon Elastic Container Service Developer Guide.
\n\t\tThe FARGATE
launch type runs your tasks on Fargate On-Demand\n\t\t\tinfrastructure.
Fargate Spot infrastructure is available for use but a capacity provider\n\t\t\t\tstrategy must be used. For more information, see Fargate capacity providers in the\n\t\t\t\t\tAmazon ECS User Guide for Fargate.
\n\t\tThe EC2
launch type runs your tasks on Amazon EC2 instances registered to your\n\t\t\tcluster.
The EXTERNAL
launch type runs your tasks on your on-premises server or\n\t\t\tvirtual machine (VM) capacity registered to your cluster.
A task can use either a launch type or a capacity provider strategy. If a\n\t\t\t\tlaunchType
is specified, the capacityProviderStrategy
\n\t\t\tparameter must be omitted.
When you use cluster auto scaling, you must specify\n\t\t\t\tcapacityProviderStrategy
and not launchType
.
A list of container overrides in JSON format that specify the name of a container in\n\t\t\tthe specified task definition and the overrides it should receive. You can override the\n\t\t\tdefault command for a container (that's specified in the task definition or Docker\n\t\t\timage) with a command
override. You can also override existing environment\n\t\t\tvariables (that are specified in the task definition or Docker image) on a container or\n\t\t\tadd new environment variables to it with an environment
override.
A total of 8192 characters are allowed for overrides. This limit includes the JSON\n\t\t\tformatting characters of the override structure.
" + "smithy.api#documentation": "A list of container overrides in JSON format that specify the name of a container in\n\t\t\tthe specified task definition and the overrides it should receive. You can override the\n\t\t\tdefault command for a container (that's specified in the task definition or Docker\n\t\t\timage) with a command
override. You can also override existing environment\n\t\t\tvariables (that are specified in the task definition or Docker image) on a container or\n\t\t\tadd new environment variables to it with an environment
override.
A total of 8192 characters are allowed for overrides. This limit includes the JSON\n\t\t\tformatting characters of the override structure.
" } }, "placementConstraints": { @@ -9351,7 +9305,7 @@ "propagateTags": { "target": "com.amazonaws.ecs#PropagateTags", "traits": { - "smithy.api#documentation": "Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.
\nAn error will be received if you specify the SERVICE
option when\n\t\t\t\trunning a task.
Specifies whether to propagate the tags from the task definition to the task. If no\n\t\t\tvalue is specified, the tags aren't propagated. Tags can only be propagated to the task\n\t\t\tduring task creation. To add tags to a task after task creation, use the TagResource API action.
\n\t\tAn error will be received if you specify the SERVICE
option when\n\t\t\t\trunning a task.
An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy
value. Up to 36 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy
value. Up to 36 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, then the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
The metadata that you apply to the task to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value, both of which you define.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the task to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value, both of which you define.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run. If a revision
isn't specified,\n\t\t\tthe latest ACTIVE
revision is used.
When you create an IAM policy for run-task, you can set the resource to be the latest\n\t\t\ttask definition revision, or a specific revision.
\nThe full ARN value must match the value that you specified as the\n\t\t\t\tResource
of the IAM principal's permissions policy.
When you specify the policy resource as the latest task definition version (by setting\n\t\t\tthe Resource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
.
When you specify the policy resource as a specific task definition version (by setting\n\t\t\tthe Resource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
or\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
.
For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service developer Guide.
", + "smithy.api#documentation": "The family
and revision
(family:revision
) or\n\t\t\tfull ARN of the task definition to run. If a revision
isn't specified,\n\t\t\tthe latest ACTIVE
revision is used.
When you create an IAM policy for run-task, you can set the resource to be the latest\n\t\t\ttask definition revision, or a specific revision.
\n\t\tThe full ARN value must match the value that you specified as the\n\t\t\t\tResource
of the IAM principal's permissions policy.
When you specify the policy resource as the latest task definition version (by setting\n\t\t\tthe Resource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName
.
When you specify the policy resource as a specific task definition version (by setting\n\t\t\tthe Resource
in the policy to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
or\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:*
),\n\t\t\tthen set this value to\n\t\t\t\tarn:aws:ecs:us-east-1:111122223333:task-definition/TaskFamilyName:1
.
For more information, see Policy Resources for Amazon ECS in the Amazon Elastic Container Service developer Guide.
", "smithy.api#required": {} } } @@ -9404,7 +9358,7 @@ "cpuArchitecture": { "target": "com.amazonaws.ecs#CPUArchitecture", "traits": { - "smithy.api#documentation": "The CPU architecture.
\nYou can run your Linux tasks on an ARM-based platform by setting the value to\n\t\t\t\tARM64
. This option is available for tasks that run on Linux Amazon EC2\n\t\t\tinstance or Linux containers on Fargate.
The CPU architecture.
\n\t\tYou can run your Linux tasks on an ARM-based platform by setting the value to\n\t\t\t\tARM64
. This option is available for tasks that run on Linux Amazon EC2\n\t\t\tinstance or Linux containers on Fargate.
Information about the platform for the Amazon ECS service or task.
\nFor more information about RuntimePlatform
, see RuntimePlatform in the Amazon Elastic Container Service Developer Guide.
Information about the platform for the Amazon ECS service or task.
\n\t\tFor more information about RuntimePlatform
, see RuntimePlatform in the Amazon Elastic Container Service Developer Guide.
The secret to expose to the container. The supported values are either the full ARN\n\t\t\tof the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter\n\t\t\tStore.
\nFor information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter\n\t\t\tstore) in the Amazon Elastic Container Service Developer Guide.
\nIf the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter.\n\t\t\t\tIf the parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.
\nThe secret to expose to the container. The supported values are either the full ARN\n\t\t\tof the Secrets Manager secret or the full ARN of the parameter in the SSM Parameter\n\t\t\tStore.
\n\t\tFor information about the require Identity and Access Management permissions, see Required IAM permissions for Amazon ECS secrets (for Secrets Manager) or Required IAM permissions for Amazon ECS secrets (for Systems Manager Parameter\n\t\t\tstore) in the Amazon Elastic Container Service Developer Guide.
\n\t\tIf the SSM Parameter Store parameter exists in the same Region as the task\n\t\t\t\tyou're launching, then you can use either the full ARN or name of the parameter.\n\t\t\t\tIf the parameter exists in a different Region, then the full ARN must be\n\t\t\t\tspecified.
\n\t\tAn object representing the secret to expose to your container. Secrets can be exposed\n\t\t\tto a container in the following ways:
\nTo inject sensitive data into your containers as environment variables, use\n\t\t\t\t\tthe secrets
container definition parameter.
To reference sensitive information in the log configuration of a container,\n\t\t\t\t\tuse the secretOptions
container definition parameter.
For more information, see Specifying\n\t\t\t\tsensitive data in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "An object representing the secret to expose to your container. Secrets can be exposed\n\t\t\tto a container in the following ways:
\n\t\tTo inject sensitive data into your containers as environment variables, use\n\t\t\t\t\tthe secrets
container definition parameter.
To reference sensitive information in the log configuration of a container,\n\t\t\t\t\tuse the secretOptions
container definition parameter.
For more information, see Specifying\n\t\t\t\tsensitive data in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#SecretList": { @@ -9611,7 +9565,7 @@ "platformFamily": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The operating system that your tasks in the service run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.
\n All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily
value as the service (for example,\n\t\t\tLINUX
).
The operating system that your tasks in the service run on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.
\n\t\t All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily
value as the service (for example,\n\t\t\tLINUX
).
The scheduling strategy to use for the service. For more information, see Services.
\nThere are two service scheduler strategies available.
\n\n REPLICA
-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.
\n DAEMON
-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance. This task meets all of the task\n\t\t\t\t\tplacement constraints that you specify in your cluster. The service scheduler\n\t\t\t\t\talso evaluates the task placement constraints for running tasks. It stop tasks\n\t\t\t\t\tthat don't meet the placement constraints.
Fargate tasks don't support the DAEMON
\n\t\t\t\t\t\tscheduling strategy.
The scheduling strategy to use for the service. For more information, see Services.
\n\t\tThere are two service scheduler strategies available.
\n\t\t\n REPLICA
-The replica scheduling strategy places and\n\t\t\t\t\tmaintains the desired number of tasks across your cluster. By default, the\n\t\t\t\t\tservice scheduler spreads tasks across Availability Zones. You can use task\n\t\t\t\t\tplacement strategies and constraints to customize task placement\n\t\t\t\t\tdecisions.
\n DAEMON
-The daemon scheduling strategy deploys exactly one\n\t\t\t\t\ttask on each active container instance. This task meets all of the task\n\t\t\t\t\tplacement constraints that you specify in your cluster. The service scheduler\n\t\t\t\t\talso evaluates the task placement constraints for running tasks. It stop tasks\n\t\t\t\t\tthat don't meet the placement constraints.
Fargate tasks don't support the DAEMON
\n\t\t\t\t\t\tscheduling strategy.
The metadata that you apply to the service to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define bot the key and\n\t\t\tvalue.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the service to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define bot the key and\n\t\t\tvalue.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The listening port number for the Service Connect proxy. This port is available\n\t\t\tinside of all of the tasks within the same namespace.
\nTo avoid changing your applications in client Amazon ECS services, set this to the same\n\t\t\tport that the client application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
", + "smithy.api#documentation": "The listening port number for the Service Connect proxy. This port is available\n\t\t\tinside of all of the tasks within the same namespace.
\n\t\tTo avoid changing your applications in client Amazon ECS services, set this to the same\n\t\t\tport that the client application uses by default. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
", "smithy.api#required": {} } }, "dnsName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The dnsName
is the name that you use in the applications of client tasks\n\t\t\tto connect to this service. The name must be a valid DNS name but doesn't need to be\n\t\t\tfully-qualified. The name can include up to 127 characters. The name can include\n\t\t\tlowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name\n\t\t\tcan't start with a hyphen.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the port mapping name from the task definition is used in portName.namespace
.
To avoid changing your applications in client Amazon ECS services, set this to the same\n\t\t\tname that the client application uses by default. For example, a few common names are\n\t\t\t\tdatabase
, db
, or the lowercase name of a database, such as\n\t\t\t\tmysql
or redis
. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
The dnsName
is the name that you use in the applications of client tasks\n\t\t\tto connect to this service. The name must be a valid DNS name but doesn't need to be\n\t\t\tfully-qualified. The name can include up to 127 characters. The name can include\n\t\t\tlowercase letters, numbers, underscores (_), hyphens (-), and periods (.). The name\n\t\t\tcan't start with a hyphen.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the port mapping name from the task definition is used in portName.namespace
.
To avoid changing your applications in client Amazon ECS services, set this to the same\n\t\t\tname that the client application uses by default. For example, a few common names are\n\t\t\t\tdatabase
, db
, or the lowercase name of a database, such as\n\t\t\t\tmysql
or redis
. For more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks\n\t\t\t(\"clients\") can use to connect to this service.
\nEach name and port mapping must be unique within the namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Each alias (\"endpoint\") is a fully-qualified name and port number that other tasks\n\t\t\t(\"clients\") can use to connect to this service.
\n\t\tEach name and port mapping must be unique within the namespace.
\n\t\tTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#ServiceConnectClientAliasList": { @@ -9776,7 +9730,7 @@ "services": { "target": "com.amazonaws.ecs#ServiceConnectServiceList", "traits": { - "smithy.api#documentation": "The list of Service Connect service objects. These are names and aliases (also known\n\t\t\tas endpoints) that are used by other Amazon ECS services to connect to this service.\n\t\t\t
\nThis field is not required for a \"client\" Amazon ECS service that's a member of a namespace\n\t\t\tonly to connect to other services within the namespace. An example of this would be a\n\t\t\tfrontend application that accepts incoming requests from either a load balancer that's\n\t\t\tattached to the service or by other means.
\nAn object selects a port from the task definition, assigns a name for the Cloud Map\n\t\t\tservice, and a list of aliases (endpoints) and ports for client applications to refer to\n\t\t\tthis service.
" + "smithy.api#documentation": "The list of Service Connect service objects. These are names and aliases (also known\n\t\t\tas endpoints) that are used by other Amazon ECS services to connect to this service.\n\t\t\t
\n\t\tThis field is not required for a \"client\" Amazon ECS service that's a member of a namespace\n\t\t\tonly to connect to other services within the namespace. An example of this would be a\n\t\t\tfrontend application that accepts incoming requests from either a load balancer that's\n\t\t\tattached to the service or by other means.
\n\t\tAn object selects a port from the task definition, assigns a name for the Cloud Map\n\t\t\tservice, and a list of aliases (endpoints) and ports for client applications to refer to\n\t\t\tthis service.
" } }, "logConfiguration": { @@ -9784,7 +9738,7 @@ } }, "traits": { - "smithy.api#documentation": "The Service Connect configuration of your Amazon ECS service. The configuration for this\n\t\t\tservice to discover and connect to services, and be discovered by, and connected from,\n\t\t\tother services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The Service Connect configuration of your Amazon ECS service. The configuration for this\n\t\t\tservice to discover and connect to services, and be discovered by, and connected from,\n\t\t\tother services within a namespace.
\n\t\tTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#ServiceConnectService": { @@ -9800,19 +9754,19 @@ "discoveryName": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The discoveryName
is the name of the new Cloud Map service that Amazon ECS creates\n\t\t\tfor this Amazon ECS service. This must be unique within the Cloud Map namespace. The name can contain up to 64 characters. The name can include lowercase letters,\n\t\t\tnumbers, underscores (_), and hyphens (-). The name can't start with a hyphen.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the port mapping name from the task definition is used in portName.namespace
.
The discoveryName
is the name of the new Cloud Map service that Amazon ECS creates\n\t\t\tfor this Amazon ECS service. This must be unique within the Cloud Map namespace. The name can contain up to 64 characters. The name can include lowercase letters,\n\t\t\tnumbers, underscores (_), and hyphens (-). The name can't start with a hyphen.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the port mapping name from the task definition is used in portName.namespace
.
The list of client aliases for this Service Connect service. You use these to assign\n\t\t\tnames that can be used by client applications. The maximum number of client aliases that\n\t\t\tyou can have in this list is 1.
\nEach alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS\n\t\t\ttasks (\"clients\") can use to connect to this service.
\nEach name and port mapping must be unique within the namespace.
\nFor each ServiceConnectService
, you must provide at least one\n\t\t\t\tclientAlias
with one port
.
The list of client aliases for this Service Connect service. You use these to assign\n\t\t\tnames that can be used by client applications. The maximum number of client aliases that\n\t\t\tyou can have in this list is 1.
\n\t\tEach alias (\"endpoint\") is a fully-qualified name and port number that other Amazon ECS\n\t\t\ttasks (\"clients\") can use to connect to this service.
\n\t\tEach name and port mapping must be unique within the namespace.
\n\t\tFor each ServiceConnectService
, you must provide at least one\n\t\t\t\tclientAlias
with one port
.
The port number for the Service Connect proxy to listen on.
\nUse the value of this field to bypass the proxy for traffic on the port number\n\t\t\tspecified in the named portMapping
in the task definition of this\n\t\t\tapplication, and then use it in your VPC security groups to allow traffic into the proxy\n\t\t\tfor this Amazon ECS service.
In awsvpc
mode and Fargate, the default value is the container port\n\t\t\tnumber. The container port number is in the portMapping
in the task\n\t\t\tdefinition. In bridge mode, the default value is the ephemeral port of the\n\t\t\tService Connect proxy.
The port number for the Service Connect proxy to listen on.
\n\t\tUse the value of this field to bypass the proxy for traffic on the port number\n\t\t\tspecified in the named portMapping
in the task definition of this\n\t\t\tapplication, and then use it in your VPC security groups to allow traffic into the proxy\n\t\t\tfor this Amazon ECS service.
In awsvpc
mode and Fargate, the default value is the container port\n\t\t\tnumber. The container port number is in the portMapping
in the task\n\t\t\tdefinition. In bridge mode, the default value is the ephemeral port of the\n\t\t\tService Connect proxy.
The discovery name of this Service Connect resource.
\nThe discoveryName
is the name of the new Cloud Map service that Amazon ECS creates\n\t\t\tfor this Amazon ECS service. This must be unique within the Cloud Map namespace. The name can contain up to 64 characters. The name can include lowercase letters,\n\t\t\tnumbers, underscores (_), and hyphens (-). The name can't start with a hyphen.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the port mapping name from the task definition is used in portName.namespace
.
The discovery name of this Service Connect resource.
\n\t\tThe discoveryName
is the name of the new Cloud Map service that Amazon ECS creates\n\t\t\tfor this Amazon ECS service. This must be unique within the Cloud Map namespace. The name can contain up to 64 characters. The name can include lowercase letters,\n\t\t\tnumbers, underscores (_), and hyphens (-). The name can't start with a hyphen.
If this parameter isn't specified, the default value of discoveryName.namespace
is used. If the discoveryName
isn't specified, the port mapping name from the task definition is used in portName.namespace
.
The Service Connect resource. Each configuration maps a discovery name to a\n\t\t\tCloud Map service name. The data is stored in Cloud Map as part of the\n\t\t\tService Connect configuration for each discovery name of this Amazon ECS service.
\nA task can resolve the dnsName
for each of the clientAliases
\n\t\t\tof a service. However a task can't resolve the discovery names. If you want to connect\n\t\t\tto a service, refer to the ServiceConnectConfiguration
of that service for\n\t\t\tthe list of clientAliases
that you can use.
The Service Connect resource. Each configuration maps a discovery name to a\n\t\t\tCloud Map service name. The data is stored in Cloud Map as part of the\n\t\t\tService Connect configuration for each discovery name of this Amazon ECS service.
\n\t\tA task can resolve the dnsName
for each of the clientAliases
\n\t\t\tof a service. However a task can't resolve the discovery names. If you want to connect\n\t\t\tto a service, refer to the ServiceConnectConfiguration
of that service for\n\t\t\tthe list of clientAliases
that you can use.
The details for the service registry.
\nEach service may be associated with one service registry. Multiple service registries\n\t\t\tfor each service are not supported.
\nWhen you add, update, or remove the service registries configuration, Amazon ECS starts a\n\t\t\tnew deployment. New tasks are registered and deregistered to the updated service\n\t\t\tregistry configuration.
" + "smithy.api#documentation": "The details for the service registry.
\n\t\tEach service may be associated with one service registry. Multiple service registries\n\t\t\tfor each service are not supported.
\n\t\tWhen you add, update, or remove the service registries configuration, Amazon ECS starts a\n\t\t\tnew deployment. New tasks are registered and deregistered to the updated service\n\t\t\tregistry configuration.
" } }, "com.amazonaws.ecs#Services": { @@ -10119,7 +10073,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.
\nAlternatively, you can use RunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Starts a new task from the specified task definition on the specified container\n\t\t\tinstance or instances.
\n\t\tAlternatively, you can use RunTask to place tasks for you. For more\n\t\t\tinformation, see Scheduling Tasks in the Amazon Elastic Container Service Developer Guide.
" } }, "com.amazonaws.ecs#StartTaskRequest": { @@ -10167,7 +10121,7 @@ "overrides": { "target": "com.amazonaws.ecs#TaskOverride", "traits": { - "smithy.api#documentation": "A list of container overrides in JSON format that specify the name of a container in\n\t\t\tthe specified task definition and the overrides it receives. You can override the\n\t\t\tdefault command for a container (that's specified in the task definition or Docker\n\t\t\timage) with a command
override. You can also override existing environment\n\t\t\tvariables (that are specified in the task definition or Docker image) on a container or\n\t\t\tadd new environment variables to it with an environment
override.
A total of 8192 characters are allowed for overrides. This limit includes the JSON\n\t\t\t\tformatting characters of the override structure.
\nA list of container overrides in JSON format that specify the name of a container in\n\t\t\tthe specified task definition and the overrides it receives. You can override the\n\t\t\tdefault command for a container (that's specified in the task definition or Docker\n\t\t\timage) with a command
override. You can also override existing environment\n\t\t\tvariables (that are specified in the task definition or Docker image) on a container or\n\t\t\tadd new environment variables to it with an environment
override.
A total of 8192 characters are allowed for overrides. This limit includes the JSON\n\t\t\t\tformatting characters of the override structure.
\n\t\tAn optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy
value. Up to 36 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
An optional tag specified when a task is started. For example, if you automatically\n\t\t\ttrigger a task to run a batch process job, you could apply a unique identifier for that\n\t\t\tjob to your task with the startedBy
parameter. You can then identify which\n\t\t\ttasks belong to that job by filtering the results of a ListTasks call\n\t\t\twith the startedBy
value. Up to 36 letters (uppercase and lowercase),\n\t\t\tnumbers, hyphens (-), and underscores (_) are allowed.
If a task is started by an Amazon ECS service, the startedBy
parameter\n\t\t\tcontains the deployment ID of the service that starts it.
The metadata that you apply to the task to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value, both of which you define.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the task to help you categorize and organize them. Each\n\t\t\ttag consists of a key and an optional value, both of which you define.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
Stops a running task. Any tags associated with the task will be deleted.
\nWhen StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop
is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM
value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL
value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL
value is sent.
The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT
variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
Stops a running task. Any tags associated with the task will be deleted.
\n\t\tWhen StopTask is called on a task, the equivalent of docker\n\t\t\t\tstop
is issued to the containers running in the task. This results in a\n\t\t\t\tSIGTERM
value and a default 30-second timeout, after which the\n\t\t\t\tSIGKILL
value is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
value gracefully and exits within 30 seconds\n\t\t\tfrom receiving it, no SIGKILL
value is sent.
The default 30-second timeout can be configured on the Amazon ECS container agent with\n\t\t\t\tthe ECS_CONTAINER_STOP_TIMEOUT
variable. For more information, see\n\t\t\t\t\tAmazon ECS Container Agent Configuration in the\n\t\t\t\tAmazon Elastic Container Service Developer Guide.
This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nSent to acknowledge that an attachment changed states.
" + "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nSent to acknowledge that an attachment changed states.
" } }, "com.amazonaws.ecs#SubmitAttachmentStateChangesRequest": { @@ -10380,7 +10334,7 @@ } ], "traits": { - "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nSent to acknowledge that a container changed states.
" + "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nSent to acknowledge that a container changed states.
" } }, "com.amazonaws.ecs#SubmitContainerStateChangeRequest": { @@ -10470,7 +10424,7 @@ } ], "traits": { - "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nSent to acknowledge that a task changed states.
" + "smithy.api#documentation": "This action is only used by the Amazon ECS agent, and it is not intended for use outside of the agent.
\nSent to acknowledge that a task changed states.
" } }, "com.amazonaws.ecs#SubmitTaskStateChangeRequest": { @@ -10566,7 +10520,7 @@ } }, "traits": { - "smithy.api#documentation": "A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls
in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl
option to docker run.
We don't recommend that you specify network-related systemControls
\n\t\t\tparameters for multiple containers in a single task. This task also uses either the\n\t\t\t\tawsvpc
or host
network mode. It does it for the following\n\t\t\treasons.
For tasks that use the awsvpc
network mode, if you set\n\t\t\t\t\t\tsystemControls
for any container, it applies to all containers\n\t\t\t\t\tin the task. If you set different systemControls
for multiple\n\t\t\t\t\tcontainers in a single task, the container that's started last determines which\n\t\t\t\t\t\tsystemControls
take effect.
For tasks that use the host
network mode, the\n\t\t\t\t\t\tsystemControls
parameter applies to the container instance's\n\t\t\t\t\tkernel parameter and that of all containers of any tasks running on that\n\t\t\t\t\tcontainer instance.
A list of namespaced kernel parameters to set in the container. This parameter maps to\n\t\t\t\tSysctls
in the Create a container section of the\n\t\t\tDocker Remote API and the --sysctl
option to docker run.
We don't recommend that you specify network-related systemControls
\n\t\t\tparameters for multiple containers in a single task. This task also uses either the\n\t\t\t\tawsvpc
or host
network mode. It does it for the following\n\t\t\treasons.
For tasks that use the awsvpc
network mode, if you set\n\t\t\t\t\t\tsystemControls
for any container, it applies to all containers\n\t\t\t\t\tin the task. If you set different systemControls
for multiple\n\t\t\t\t\tcontainers in a single task, the container that's started last determines which\n\t\t\t\t\t\tsystemControls
take effect.
For tasks that use the host
network mode, the\n\t\t\t\t\t\tsystemControls
parameter applies to the container instance's\n\t\t\t\t\tkernel parameter and that of all containers of any tasks running on that\n\t\t\t\t\tcontainer instance.
The metadata that you apply to a resource to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define them.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to a resource to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define them.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The tags to add to the resource. A tag is an array of key-value pairs.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The tags to add to the resource. A tag is an array of key-value pairs.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The execute command cannot run. This error can be caused by any of the following\n\t\t\tconfiguration issues:
\nIncorrect IAM permissions
\nThe SSM agent is not installed or is not running
\nThere is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for\n\t\t\t\t\tfor Systems Manager Session Manager
\nFor information about how to troubleshoot the issues, see Troubleshooting issues with ECS\n\t\t\t\tExec in the Amazon Elastic Container Service Developer Guide.
", + "smithy.api#documentation": "The execute command cannot run. This error can be caused by any of the following\n\t\t\tconfiguration issues:
\n\t\tIncorrect IAM permissions
\n\t\t\tThe SSM agent is not installed or is not running
\n\t\t\tThere is an interface Amazon VPC endpoint for Amazon ECS, but there is not one for\n\t\t\t\t\tfor Systems Manager Session Manager
\n\t\t\tFor information about how to troubleshoot the issues, see Troubleshooting issues with ECS\n\t\t\t\tExec in the Amazon Elastic Container Service Developer Guide.
", "smithy.api#error": "client" } }, @@ -10780,7 +10734,7 @@ "cpu": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The number of CPU units used by the task as expressed in a task definition. It can be\n\t\t\texpressed as an integer using CPU units (for example, 1024
). It can also be\n\t\t\texpressed as a string using vCPUs (for example, 1 vCPU
or 1\n\t\t\t\tvcpu
). String values are converted to an integer that indicates the CPU units\n\t\t\twhen the task definition is registered.
If you use the EC2 launch type, this field is optional. Supported values\n\t\t\tare between 128
CPU units (0.125
vCPUs) and 10240
\n\t\t\tCPU units (10
vCPUs).
If you use the Fargate launch type, this field is required. You must use\n\t\t\tone of the following values. These values determine the range of supported values for\n\t\t\tthe memory
parameter:
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The number of CPU units used by the task as expressed in a task definition. It can be\n\t\t\texpressed as an integer using CPU units (for example, 1024
). It can also be\n\t\t\texpressed as a string using vCPUs (for example, 1 vCPU
or 1\n\t\t\t\tvcpu
). String values are converted to an integer that indicates the CPU units\n\t\t\twhen the task definition is registered.
If you use the EC2 launch type, this field is optional. Supported values\n\t\t\tare between 128
CPU units (0.125
vCPUs) and 10240
\n\t\t\tCPU units (10
vCPUs).
If you use the Fargate launch type, this field is required. You must use\n\t\t\tone of the following values. These values determine the range of supported values for\n\t\t\tthe memory
parameter:
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n\t\t256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The health status for the task. It's determined by the health of the essential\n\t\t\tcontainers in the task. If all essential containers in the task are reporting as\n\t\t\t\tHEALTHY
, the task status also reports as HEALTHY
. If any\n\t\t\tessential containers in the task are reporting as UNHEALTHY
or\n\t\t\t\tUNKNOWN
, the task status also reports as UNHEALTHY
or\n\t\t\t\tUNKNOWN
.
The Amazon ECS container agent doesn't monitor or report on Docker health checks that\n\t\t\t\tare embedded in a container image and not specified in the container definition. For\n\t\t\t\texample, this includes those specified in a parent image or from the image's\n\t\t\t\tDockerfile. Health check parameters that are specified in a container definition\n\t\t\t\toverride any Docker health checks that are found in the container image.
\nThe health status for the task. It's determined by the health of the essential\n\t\t\tcontainers in the task. If all essential containers in the task are reporting as\n\t\t\t\tHEALTHY
, the task status also reports as HEALTHY
. If any\n\t\t\tessential containers in the task are reporting as UNHEALTHY
or\n\t\t\t\tUNKNOWN
, the task status also reports as UNHEALTHY
or\n\t\t\t\tUNKNOWN
.
The Amazon ECS container agent doesn't monitor or report on Docker health checks that\n\t\t\t\tare embedded in a container image and not specified in the container definition. For\n\t\t\t\texample, this includes those specified in a parent image or from the image's\n\t\t\t\tDockerfile. Health check parameters that are specified in a container definition\n\t\t\t\toverride any Docker health checks that are found in the container image.
\n\t\tThe amount of memory (in MiB) that the task uses as expressed in a task definition. It\n\t\t\tcan be expressed as an integer using MiB (for example, 1024
). If it's\n\t\t\texpressed as a string using GB (for example, 1GB
or 1 GB
),\n\t\t\tit's converted to an integer indicating the MiB when the task definition is\n\t\t\tregistered.
If you use the EC2 launch type, this field is optional.
\nIf you use the Fargate launch type, this field is required. You must use\n\t\t\tone of the following values. The value that you choose determines the range of supported\n\t\t\tvalues for the cpu
parameter.
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
Between 16 GB and 60 GB in 4 GB increments - Available cpu
values: 8192 (8 vCPU)
This option requires Linux platform 1.4.0
or\n later.
Between 32GB and 120 GB in 8 GB increments - Available cpu
values: 16384 (16 vCPU)
This option requires Linux platform 1.4.0
or\n later.
The amount of memory (in MiB) that the task uses as expressed in a task definition. It\n\t\t\tcan be expressed as an integer using MiB (for example, 1024
). If it's\n\t\t\texpressed as a string using GB (for example, 1GB
or 1 GB
),\n\t\t\tit's converted to an integer indicating the MiB when the task definition is\n\t\t\tregistered.
If you use the EC2 launch type, this field is optional.
\n\t\tIf you use the Fargate launch type, this field is required. You must use\n\t\t\tone of the following values. The value that you choose determines the range of supported\n\t\t\tvalues for the cpu
parameter.
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
Between 16 GB and 60 GB in 4 GB increments - Available cpu
values: 8192 (8 vCPU)
This option requires Linux platform 1.4.0
or\n later.
Between 32GB and 120 GB in 8 GB increments - Available cpu
values: 16384 (16 vCPU)
This option requires Linux platform 1.4.0
or\n later.
The operating system that your tasks are running on. A platform family is specified\n\t\t\tonly for tasks that use the Fargate launch type.
\n All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily
value as the service (for example,\n\t\t\tLINUX.
).
The operating system that your tasks are running on. A platform family is specified\n\t\t\tonly for tasks that use the Fargate launch type.
\n\t\t All tasks that run as part of this service must use the same\n\t\t\t\tplatformFamily
value as the service (for example,\n\t\t\tLINUX.
).
The stop code indicating why a task was stopped. The stoppedReason
might\n\t\t\tcontain additional details.
The following are valid values:
\n\n TaskFailedToStart
\n
\n EssentialContainerExited
\n
\n UserInitiated
\n
\n TerminationNotice
\n
\n ServiceSchedulerInitiated
\n
\n SpotInterruption
\n
The stop code indicating why a task was stopped. The stoppedReason
might\n\t\t\tcontain additional details.
The following are valid values:
\n\t\t\n TaskFailedToStart
\n
\n EssentialContainerExited
\n
\n UserInitiated
\n
\n TerminationNotice
\n
\n ServiceSchedulerInitiated
\n
\n SpotInterruption
\n
The metadata that you apply to the task to help you categorize and organize the task.\n\t\t\tEach tag consists of a key and an optional value. You define both the key and\n\t\t\tvalue.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the task to help you categorize and organize the task.\n\t\t\tEach tag consists of a key and an optional value. You define both the key and\n\t\t\tvalue.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The name of a family that this task definition is registered to. Up to 255 characters\n\t\t\tare allowed. Letters (both uppercase and lowercase letters), numbers, hyphens (-), and\n\t\t\tunderscores (_) are allowed.
\nA family groups multiple versions of a task definition. Amazon ECS gives the first task\n\t\t\tdefinition that you registered to a family a revision number of 1. Amazon ECS gives\n\t\t\tsequential revision numbers to each task definition that you add.
" + "smithy.api#documentation": "The name of a family that this task definition is registered to. Up to 255 characters\n\t\t\tare allowed. Letters (both uppercase and lowercase letters), numbers, hyphens (-), and\n\t\t\tunderscores (_) are allowed.
\n\t\tA family groups multiple versions of a task definition. Amazon ECS gives the first task\n\t\t\tdefinition that you registered to a family a revision number of 1. Amazon ECS gives\n\t\t\tsequential revision numbers to each task definition that you add.
" } }, "taskRoleArn": { "target": "com.amazonaws.ecs#String", "traits": { - "smithy.api#documentation": "The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS\n\t\t\t\tTask Role in the Amazon Elastic Container Service Developer Guide.
\nIAM roles for tasks on Windows require that the -EnableTaskIAMRole
option\n\t\t\tis set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some\n\t\t\tconfiguration code to use the feature. For more information, see Windows IAM roles\n\t\t\t\tfor tasks in the Amazon Elastic Container Service Developer Guide.
The short name or full Amazon Resource Name (ARN) of the Identity and Access Management role that grants containers in the\n\t\t\ttask permission to call Amazon Web Services APIs on your behalf. For more information, see Amazon ECS\n\t\t\t\tTask Role in the Amazon Elastic Container Service Developer Guide.
\n\t\tIAM roles for tasks on Windows require that the -EnableTaskIAMRole
option\n\t\t\tis set when you launch the Amazon ECS-optimized Windows AMI. Your containers must also run some\n\t\t\tconfiguration code to use the feature. For more information, see Windows IAM roles\n\t\t\t\tfor tasks in the Amazon Elastic Container Service Developer Guide.
The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
For more information, see Network\n settings in the Docker run reference.
" + "smithy.api#documentation": "The Docker networking mode to use for the containers in the task. The valid values are\n none
, bridge
, awsvpc
, and host
.\n If no network mode is specified, the default is bridge
.
For Amazon ECS tasks on Fargate, the awsvpc
network mode is required. \n For Amazon ECS tasks on Amazon EC2 Linux instances, any network mode can be used. For Amazon ECS tasks on Amazon EC2 Windows instances,
or awsvpc
can be used. If the network\n mode is set to none
, you cannot specify port mappings in your container\n definitions, and the tasks containers do not have external connectivity. The\n host
and awsvpc
network modes offer the highest networking\n performance for containers because they use the EC2 network stack instead of the\n virtualized network stack provided by the bridge
mode.
With the host
and awsvpc
network modes, exposed container\n ports are mapped directly to the corresponding host port (for the host
\n network mode) or the attached elastic network interface port (for the\n awsvpc
network mode), so you cannot take advantage of dynamic host port\n mappings.
When using the host
network mode, you should not run\n containers using the root user (UID 0). It is considered best practice\n to use a non-root user.
If the network mode is awsvpc
, the task is allocated an elastic network\n interface, and you must specify a NetworkConfiguration value when you create\n a service or run a task with the task definition. For more information, see Task Networking in the\n Amazon Elastic Container Service Developer Guide.
If the network mode is host
, you cannot run multiple instantiations of the\n same task on a single container instance when port mappings are used.
For more information, see Network\n settings in the Docker run reference.
" } }, "revision": { @@ -10995,7 +10949,7 @@ "volumes": { "target": "com.amazonaws.ecs#VolumeList", "traits": { - "smithy.api#documentation": "The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide.
\nThe host
and sourcePath
parameters aren't supported for\n\t\t\t\ttasks run on Fargate.
The list of data volume definitions for the task. For more information, see Using data volumes in tasks in the Amazon Elastic Container Service Developer Guide.
\n\t\tThe host
and sourcePath
parameters aren't supported for\n\t\t\t\ttasks run on Fargate.
The container instance attributes required by your task. When an Amazon EC2 instance is\n\t\t\tregistered to your cluster, the Amazon ECS container agent assigns some standard attributes\n\t\t\tto the instance. You can apply custom attributes. These are specified as key-value pairs\n\t\t\tusing the Amazon ECS console or the PutAttributes API. These attributes are\n\t\t\tused when determining task placement for tasks hosted on Amazon EC2 instances. For more\n\t\t\tinformation, see Attributes in the Amazon Elastic Container Service Developer Guide.
\nThis parameter isn't supported for tasks run on Fargate.
\nThe container instance attributes required by your task. When an Amazon EC2 instance is\n\t\t\tregistered to your cluster, the Amazon ECS container agent assigns some standard attributes\n\t\t\tto the instance. You can apply custom attributes. These are specified as key-value pairs\n\t\t\tusing the Amazon ECS console or the PutAttributes API. These attributes are\n\t\t\tused when determining task placement for tasks hosted on Amazon EC2 instances. For more\n\t\t\tinformation, see Attributes in the Amazon Elastic Container Service Developer Guide.
\n\t\tThis parameter isn't supported for tasks run on Fargate.
\n\t\tAn array of placement constraint objects to use for tasks.
\nThis parameter isn't supported for tasks run on Fargate.
\nAn array of placement constraint objects to use for tasks.
\n\t\tThis parameter isn't supported for tasks run on Fargate.
\n\t\tThe operating system that your task definitions are running on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.
\nWhen you specify a task in a service, this value must match the\n\t\t\t\truntimePlatform
value of the service.
The operating system that your task definitions are running on. A platform family is\n\t\t\tspecified only for tasks using the Fargate launch type.
\n\t\tWhen you specify a task in a service, this value must match the\n\t\t\t\truntimePlatform
value of the service.
The number of cpu
units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The number of cpu
units used by the task. If you use the EC2 launch type,\n\t\t\tthis field is optional. Any value can be used. If you use the Fargate launch type, this\n\t\t\tfield is required. You must use one of the following values. The value that you choose\n\t\t\tdetermines your range of valid values for the memory
parameter.
The CPU units cannot be less than 1 vCPU when you use Windows containers on\n\t\t\tFargate.
\n\t\t256 (.25 vCPU) - Available memory
values: 512 (0.5 GB), 1024 (1 GB), 2048 (2 GB)
512 (.5 vCPU) - Available memory
values: 1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB)
1024 (1 vCPU) - Available memory
values: 2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB)
2048 (2 vCPU) - Available memory
values: 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB)
4096 (4 vCPU) - Available memory
values: 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB)
8192 (8 vCPU) - Available memory
values: 16 GB and 60 GB in 4 GB increments
This option requires Linux platform 1.4.0
or\n later.
16384 (16vCPU) - Available memory
values: 32GB and 120 GB in 8 GB increments
This option requires Linux platform 1.4.0
or\n later.
The amount (in MiB) of memory used by the task.
\nIf your tasks runs on Amazon EC2 instances, you must specify either a task-level memory\n\t\t\tvalue or a container-level memory value. This field is optional and any value can be\n\t\t\tused. If a task-level memory value is specified, the container-level memory value is\n\t\t\toptional. For more information regarding container-level memory and memory reservation,\n\t\t\tsee ContainerDefinition.
\nIf your tasks runs on Fargate, this field is required. You must use one of the\n\t\t\tfollowing values. The value you choose determines your range of valid values for the\n\t\t\t\tcpu
parameter.
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
Between 16 GB and 60 GB in 4 GB increments - Available cpu
values: 8192 (8 vCPU)
This option requires Linux platform 1.4.0
or\n later.
Between 32GB and 120 GB in 8 GB increments - Available cpu
values: 16384 (16 vCPU)
This option requires Linux platform 1.4.0
or\n later.
The amount (in MiB) of memory used by the task.
\n\t\tIf your tasks runs on Amazon EC2 instances, you must specify either a task-level memory\n\t\t\tvalue or a container-level memory value. This field is optional and any value can be\n\t\t\tused. If a task-level memory value is specified, the container-level memory value is\n\t\t\toptional. For more information regarding container-level memory and memory reservation,\n\t\t\tsee ContainerDefinition.
\n\t\tIf your tasks runs on Fargate, this field is required. You must use one of the\n\t\t\tfollowing values. The value you choose determines your range of valid values for the\n\t\t\t\tcpu
parameter.
512 (0.5 GB), 1024 (1 GB), 2048 (2 GB) - Available cpu
values: 256 (.25 vCPU)
1024 (1 GB), 2048 (2 GB), 3072 (3 GB), 4096 (4 GB) - Available cpu
values: 512 (.5 vCPU)
2048 (2 GB), 3072 (3 GB), 4096 (4 GB), 5120 (5 GB), 6144 (6 GB), 7168 (7 GB), 8192 (8 GB) - Available cpu
values: 1024 (1 vCPU)
Between 4096 (4 GB) and 16384 (16 GB) in increments of 1024 (1 GB) - Available cpu
values: 2048 (2 vCPU)
Between 8192 (8 GB) and 30720 (30 GB) in increments of 1024 (1 GB) - Available cpu
values: 4096 (4 vCPU)
Between 16 GB and 60 GB in 4 GB increments - Available cpu
values: 8192 (8 vCPU)
This option requires Linux platform 1.4.0
or\n later.
Between 32GB and 120 GB in 8 GB increments - Available cpu
values: 16384 (16 vCPU)
This option requires Linux platform 1.4.0
or\n later.
The process namespace to use for the containers in the task. The valid\n values are host
or task
. If host
\n is specified, then all containers within the tasks that specified the\n host
PID mode on the same container instance share the\n same process namespace with the host Amazon EC2 instance. If task
is\n specified, all containers within the specified task share the same\n process namespace. If no value is specified, the default is a private\n namespace. For more information, see PID settings in the Docker run\n reference.
If the host
PID mode is used, be aware that there is a\n heightened risk of undesired process namespace expose. For more\n information, see Docker\n security.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe process namespace to use for the containers in the task. The valid\n values are host
or task
. If host
\n is specified, then all containers within the tasks that specified the\n host
PID mode on the same container instance share the\n same process namespace with the host Amazon EC2 instance. If task
is\n specified, all containers within the specified task share the same\n process namespace. If no value is specified, the default is a private\n namespace. For more information, see PID settings in the Docker run\n reference.
If the host
PID mode is used, be aware that there is a\n heightened risk of undesired process namespace expose. For more\n information, see Docker\n security.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe IPC resource namespace to use for the containers in the task. The valid values are\n host
, task
, or none
. If host
is\n specified, then all containers within the tasks that specified the host
IPC\n mode on the same container instance share the same IPC resources with the host Amazon EC2\n instance. If task
is specified, all containers within the specified task\n share the same IPC resources. If none
is specified, then IPC resources\n within the containers of a task are private and not shared with other containers in a\n task or on the container instance. If no value is specified, then the IPC resource\n namespace sharing depends on the Docker daemon setting on the container instance. For\n more information, see IPC\n settings in the Docker run reference.
If the host
IPC mode is used, be aware that there is a heightened risk of\n undesired IPC namespace expose. For more information, see Docker\n security.
If you are setting namespaced kernel parameters using systemControls
for\n the containers in the task, the following will apply to your IPC resource namespace. For\n more information, see System\n Controls in the Amazon Elastic Container Service Developer Guide.
For tasks that use the host
IPC mode, IPC namespace related\n systemControls
are not supported.
For tasks that use the task
IPC mode, IPC namespace related\n systemControls
will apply to all containers within a\n task.
This parameter is not supported for Windows containers or tasks run on Fargate.
\nThe configuration details for the App Mesh proxy.
\nYour Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tand at least version 1.26.0-1 of the ecs-init
package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS optimized AMI\n\t\t\tversion 20190301
or later, they contain the required versions of the\n\t\t\tcontainer agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
The configuration details for the App Mesh proxy.
\n\t\tYour Amazon ECS container instances require at least version 1.26.0 of the container agent\n\t\t\tand at least version 1.26.0-1 of the ecs-init
package to use a proxy\n\t\t\tconfiguration. If your container instances are launched from the Amazon ECS optimized AMI\n\t\t\tversion 20190301
or later, they contain the required versions of the\n\t\t\tcontainer agent and ecs-init
. For more information, see Amazon ECS-optimized Linux AMI in the Amazon Elastic Container Service Developer Guide.
An object representing a constraint on task placement in the task definition. For more\n\t\t\tinformation, see Task placement constraints in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
\nTask placement constraints aren't supported for tasks run on Fargate.
\nAn object representing a constraint on task placement in the task definition. For more\n\t\t\tinformation, see Task placement constraints in the\n\t\t\tAmazon Elastic Container Service Developer Guide.
\n\t\tTask placement constraints aren't supported for tasks run on Fargate.
\n\t\tThe ephemeral storage setting override for the task.
\nThis parameter is only supported for tasks hosted on Fargate that\n\t\t\t\tuse the following platform versions:
\nLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The ephemeral storage setting override for the task.
\n\t\tThis parameter is only supported for tasks hosted on Fargate that\n\t\t\t\tuse the following platform versions:
\n\t\t\tLinux platform version 1.4.0
or later.
Windows platform version 1.0.0
or later.
The external ID associated with the task set.
\nIf an CodeDeploy deployment created a task set, the externalId
parameter\n\t\t\tcontains the CodeDeploy deployment ID.
If a task set is created for an external deployment and is associated with a service\n\t\t\tdiscovery registry, the externalId
parameter contains the\n\t\t\t\tECS_TASK_SET_EXTERNAL_ID
Cloud Map attribute.
The external ID associated with the task set.
\n\t\tIf an CodeDeploy deployment created a task set, the externalId
parameter\n\t\t\tcontains the CodeDeploy deployment ID.
If a task set is created for an external deployment and is associated with a service\n\t\t\tdiscovery registry, the externalId
parameter contains the\n\t\t\t\tECS_TASK_SET_EXTERNAL_ID
Cloud Map attribute.
The status of the task set. The following describes each state.
\nThe task set is serving production traffic.
\nThe task set isn't serving production traffic.
\nThe tasks in the task set are being stopped, and their corresponding\n\t\t\t\t\t\ttargets are being deregistered from their target group.
\nThe status of the task set. The following describes each state.
\n\t\tThe task set is serving production traffic.
\n\t\t\t\tThe task set isn't serving production traffic.
\n\t\t\t\tThe tasks in the task set are being stopped, and their corresponding\n\t\t\t\t\t\ttargets are being deregistered from their target group.
\n\t\t\t\tThe operating system that your tasks in the set are running on. A platform family is\n\t\t\tspecified only for tasks that use the Fargate launch type.
\nAll tasks in the set must have the same value.
" + "smithy.api#documentation": "The operating system that your tasks in the set are running on. A platform family is\n\t\t\tspecified only for tasks that use the Fargate launch type.
\n\t\tAll tasks in the set must have the same value.
" } }, "networkConfiguration": { @@ -11395,7 +11349,7 @@ "stabilityStatus": { "target": "com.amazonaws.ecs#StabilityStatus", "traits": { - "smithy.api#documentation": "The stability status. This indicates whether the task set has reached a steady state.\n\t\t\tIf the following conditions are met, the task set are in\n\t\t\tSTEADY_STATE
:
The task runningCount
is equal to the\n\t\t\t\t\t\tcomputedDesiredCount
.
The pendingCount
is 0
.
There are no tasks that are running on container instances in the\n\t\t\t\t\t\tDRAINING
status.
All tasks are reporting a healthy status from the load balancers, service\n\t\t\t\t\tdiscovery, and container health checks.
\nIf any of those conditions aren't met, the stability status returns\n\t\t\t\tSTABILIZING
.
The stability status. This indicates whether the task set has reached a steady state.\n\t\t\tIf the following conditions are met, the task set are in\n\t\t\tSTEADY_STATE
:
The task runningCount
is equal to the\n\t\t\t\t\t\tcomputedDesiredCount
.
The pendingCount
is 0
.
There are no tasks that are running on container instances in the\n\t\t\t\t\t\tDRAINING
status.
All tasks are reporting a healthy status from the load balancers, service\n\t\t\t\t\tdiscovery, and container health checks.
\n\t\t\tIf any of those conditions aren't met, the stability status returns\n\t\t\t\tSTABILIZING
.
The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.
\nThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The metadata that you apply to the task set to help you categorize and organize them.\n\t\t\tEach tag consists of a key and an optional value. You define both.
\n\t\tThe following basic restrictions apply to tags:
\nMaximum number of tags per resource - 50
\nFor each resource, each tag key must be unique, and each tag key can have only\n one value.
\nMaximum key length - 128 Unicode characters in UTF-8
\nMaximum value length - 256 Unicode characters in UTF-8
\nIf your tagging schema is used across multiple services and resources,\n remember that other services may have restrictions on allowed characters.\n Generally allowed characters are: letters, numbers, and spaces representable in\n UTF-8, and the following characters: + - = . _ : / @.
\nTag keys and values are case-sensitive.
\nDo not use aws:
, AWS:
, or any upper or lowercase\n combination of such as a prefix for either keys or values as it is reserved for\n Amazon Web Services use. You cannot edit or delete tag keys or values with this prefix. Tags with\n this prefix do not count against your tags per resource limit.
The list of tmpfs volume mount options.
\nValid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" |\n\t\t\t\t\"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" |\n\t\t\t\t\"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" |\n\t\t\t\t\"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" |\n\t\t\t\t\"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\"\n\t\t\t\t| \"nr_inodes\" | \"nr_blocks\" | \"mpol\"
\n
The list of tmpfs volume mount options.
\n\t\tValid values: \"defaults\" | \"ro\" | \"rw\" | \"suid\" | \"nosuid\" | \"dev\" | \"nodev\" |\n\t\t\t\t\"exec\" | \"noexec\" | \"sync\" | \"async\" | \"dirsync\" | \"remount\" | \"mand\" | \"nomand\" |\n\t\t\t\t\"atime\" | \"noatime\" | \"diratime\" | \"nodiratime\" | \"bind\" | \"rbind\" | \"unbindable\" |\n\t\t\t\t\"runbindable\" | \"private\" | \"rprivate\" | \"shared\" | \"rshared\" | \"slave\" | \"rslave\" |\n\t\t\t\t\"relatime\" | \"norelatime\" | \"strictatime\" | \"nostrictatime\" | \"mode\" | \"uid\" | \"gid\"\n\t\t\t\t| \"nr_inodes\" | \"nr_blocks\" | \"mpol\"
\n
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 1024
and hard limit\n\t\t\t\t\t\t\tis 4096
.
The ulimit
settings to pass to the container.
Amazon ECS tasks hosted on Fargate use the default\n\t\t\t\t\t\t\tresource limit values set by the operating system with the exception of\n\t\t\t\t\t\t\tthe nofile
resource limit parameter which Fargate\n\t\t\t\t\t\t\toverrides. The nofile
resource limit sets a restriction on\n\t\t\t\t\t\t\tthe number of open files that a container can use. The default\n\t\t\t\t\t\t\t\tnofile
soft limit is 1024
and hard limit\n\t\t\t\t\t\t\tis 4096
.
Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "Use this parameter to set a default Service Connect namespace. After you set a default \n\tService Connect namespace, any new services with Service Connect turned on that are created in the cluster are added as\n\tclient services in the namespace. This setting only applies to new services that set the enabled
parameter to\n\ttrue
in the ServiceConnectConfiguration
.\n\tYou can set the namespace of each service individually in the ServiceConnectConfiguration
to override this default\n\tparameter.
Tasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } } @@ -11907,7 +11861,7 @@ "settings": { "target": "com.amazonaws.ecs#ClusterSettings", "traits": { - "smithy.api#documentation": "The setting to use by default for a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights
value set with PutAccountSetting or\n\t\t\t\tPutAccountSettingDefault.
Currently, if you delete an existing cluster that does not have Container Insights\n\t\t\t\tturned on, and then create a new cluster with the same name with Container Insights\n\t\t\t\ttuned on, Container Insights will not actually be turned on. If you want to preserve\n\t\t\t\tthe same name for your existing cluster and turn on Container Insights, you must\n\t\t\t\twait 7 days before you can re-create it.
\nThe setting to use by default for a cluster. This parameter is used to turn on CloudWatch\n\t\t\tContainer Insights for a cluster. If this value is specified, it overrides the\n\t\t\t\tcontainerInsights
value set with PutAccountSetting or\n\t\t\t\tPutAccountSettingDefault.
Currently, if you delete an existing cluster that does not have Container Insights\n\t\t\t\tturned on, and then create a new cluster with the same name with Container Insights\n\t\t\t\ttuned on, Container Insights will not actually be turned on. If you want to preserve\n\t\t\t\tthe same name for your existing cluster and turn on Container Insights, you must\n\t\t\t\twait 7 days before you can re-create it.
\n\t\tUpdates the Amazon ECS container agent on a specified container instance. Updating the\n\t\t\tAmazon ECS container agent doesn't interrupt running tasks or services on the container\n\t\t\tinstance. The process for updating the agent differs depending on whether your container\n\t\t\tinstance was launched with the Amazon ECS-optimized AMI or another operating system.
\nThe UpdateContainerAgent
API isn't supported for container instances\n\t\t\t\tusing the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,\n\t\t\t\tyou can update the ecs-init
package. This updates the agent. For more\n\t\t\t\tinformation, see Updating the\n\t\t\t\t\tAmazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Agent updates with the UpdateContainerAgent
API operation do not\n\t\t\t\tapply to Windows container instances. We recommend that you launch new container\n\t\t\t\tinstances to update the agent version in your Windows clusters.
The UpdateContainerAgent
API requires an Amazon ECS-optimized AMI or Amazon\n\t\t\tLinux AMI with the ecs-init
service installed and running. For help\n\t\t\tupdating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Updates the Amazon ECS container agent on a specified container instance. Updating the\n\t\t\tAmazon ECS container agent doesn't interrupt running tasks or services on the container\n\t\t\tinstance. The process for updating the agent differs depending on whether your container\n\t\t\tinstance was launched with the Amazon ECS-optimized AMI or another operating system.
\n\t\tThe UpdateContainerAgent
API isn't supported for container instances\n\t\t\t\tusing the Amazon ECS-optimized Amazon Linux 2 (arm64) AMI. To update the container agent,\n\t\t\t\tyou can update the ecs-init
package. This updates the agent. For more\n\t\t\t\tinformation, see Updating the\n\t\t\t\t\tAmazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Agent updates with the UpdateContainerAgent
API operation do not\n\t\t\t\tapply to Windows container instances. We recommend that you launch new container\n\t\t\t\tinstances to update the agent version in your Windows clusters.
The UpdateContainerAgent
API requires an Amazon ECS-optimized AMI or Amazon\n\t\t\tLinux AMI with the ecs-init
service installed and running. For help\n\t\t\tupdating the Amazon ECS container agent on other operating systems, see Manually updating the Amazon ECS container agent in the Amazon Elastic Container Service Developer Guide.
Modifies the status of an Amazon ECS container instance.
\nOnce a container instance has reached an ACTIVE
state, you can change the\n\t\t\tstatus of a container instance to DRAINING
to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.
A container instance can't be changed to DRAINING
until it has\n\t\t\t\treached an ACTIVE
status. If the instance is in any other status, an\n\t\t\t\terror will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING
\n\t\t\tstate are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent
and maximumPercent
. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING
\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING
state and are reported as healthy by the\n\t\t\t\t\tload balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
\n\t\t\ttasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE
status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.
Modifies the status of an Amazon ECS container instance.
\n\t\tOnce a container instance has reached an ACTIVE
state, you can change the\n\t\t\tstatus of a container instance to DRAINING
to manually remove an instance\n\t\t\tfrom a cluster, for example to perform system updates, update the Docker daemon, or\n\t\t\tscale down the cluster size.
A container instance can't be changed to DRAINING
until it has\n\t\t\t\treached an ACTIVE
status. If the instance is in any other status, an\n\t\t\t\terror will be received.
When you set a container instance to DRAINING
, Amazon ECS prevents new tasks\n\t\t\tfrom being scheduled for placement on the container instance and replacement service\n\t\t\ttasks are started on other container instances in the cluster if the resources are\n\t\t\tavailable. Service tasks on the container instance that are in the PENDING
\n\t\t\tstate are stopped immediately.
Service tasks on the container instance that are in the RUNNING
state are\n\t\t\tstopped and replaced according to the service's deployment configuration parameters,\n\t\t\t\tminimumHealthyPercent
and maximumPercent
. You can change\n\t\t\tthe deployment configuration of your service using UpdateService.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during task replacement. For example,\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. If the\n\t\t\t\t\tminimum is 100%, the service scheduler can't remove existing tasks until the\n\t\t\t\t\treplacement tasks are considered healthy. Tasks for services that do not use a\n\t\t\t\t\tload balancer are considered healthy if they're in the RUNNING
\n\t\t\t\t\tstate. Tasks for services that use a load balancer are considered healthy if\n\t\t\t\t\tthey're in the RUNNING
state and are reported as healthy by the\n\t\t\t\t\tload balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during task replacement. You can use this to define the\n\t\t\t\t\treplacement batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four tasks to be\n\t\t\t\t\tdrained, provided that the cluster resources required to do this are available.\n\t\t\t\t\tIf the maximum is 100%, then replacement tasks can't start until the draining\n\t\t\t\t\ttasks have stopped.
Any PENDING
or RUNNING
tasks that do not belong to a service\n\t\t\taren't affected. You must wait for them to finish or stop them manually.
A container instance has completed draining when it has no more RUNNING
\n\t\t\ttasks. You can verify this using ListTasks.
When a container instance has been drained, you can set a container instance to\n\t\t\t\tACTIVE
status and once it has reached that status the Amazon ECS scheduler\n\t\t\tcan begin scheduling tasks on the instance again.
Modifies the parameters of a service.
\nFor services using the rolling update (ECS
) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\nYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
If you have updated the Docker image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\nIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\nDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\nBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\nSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\nPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\nWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\nSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\nStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\nYou must have a service-linked role when you update any of the following service\n\t\t\t\tproperties. If you specified a custom IAM role when you created the service, Amazon ECS\n\t\t\t\tautomatically replaces the roleARN associated with the service with the ARN of your\n\t\t\t\tservice-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide.
\n\n loadBalancers,
\n
\n serviceRegistries
\n
Modifies the parameters of a service.
\n\t\tFor services using the rolling update (ECS
) you can update the desired\n\t\t\tcount, deployment configuration, network configuration, load balancers, service\n\t\t\tregistries, enable ECS managed tags option, propagate tags option, task placement\n\t\t\tconstraints and strategies, and task definition. When you update any of these\n\t\t\tparameters, Amazon ECS starts new tasks with the new configuration.
For services using the blue/green (CODE_DEPLOY
) deployment controller,\n\t\t\tonly the desired count, deployment configuration, health check grace period, task\n\t\t\tplacement constraints and strategies, enable ECS managed tags option, and propagate tags\n\t\t\tcan be updated using this API. If the network configuration, platform version, task\n\t\t\tdefinition, or load balancer need to be updated, create a new CodeDeploy deployment. For more\n\t\t\tinformation, see CreateDeployment in the CodeDeploy API Reference.
For services using an external deployment controller, you can update only the desired\n\t\t\tcount, task placement constraints and strategies, health check grace period, enable ECS\n\t\t\tmanaged tags option, and propagate tags option, using this API. If the launch type, load\n\t\t\tbalancer, network configuration, platform version, or task definition need to be\n\t\t\tupdated, create a new task set For more information, see CreateTaskSet.
\n\t\tYou can add to or subtract from the number of instantiations of a task definition in a\n\t\t\tservice by specifying the cluster that the service is running in and a new\n\t\t\t\tdesiredCount
parameter.
If you have updated the Docker image of your application, you can create a new task\n\t\t\tdefinition with that image and deploy it to your service. The service scheduler uses the\n\t\t\tminimum healthy percent and maximum percent parameters (in the service's deployment\n\t\t\tconfiguration) to determine the deployment strategy.
\n\t\tIf your updated Docker image uses the same tag as what is in the existing task\n\t\t\t\tdefinition for your service (for example, my_image:latest
), you don't\n\t\t\t\tneed to create a new revision of your task definition. You can update the service\n\t\t\t\tusing the forceNewDeployment
option. The new tasks launched by the\n\t\t\t\tdeployment pull the current image/tag combination from your repository when they\n\t\t\t\tstart.
You can also update the deployment configuration of a service. When a deployment is\n\t\t\ttriggered by updating the task definition of a service, the service scheduler uses the\n\t\t\tdeployment configuration parameters, minimumHealthyPercent
and\n\t\t\t\tmaximumPercent
, to determine the deployment strategy.
If minimumHealthyPercent
is below 100%, the scheduler can ignore\n\t\t\t\t\t\tdesiredCount
temporarily during a deployment. For example, if\n\t\t\t\t\t\tdesiredCount
is four tasks, a minimum of 50% allows the\n\t\t\t\t\tscheduler to stop two existing tasks before starting two new tasks. Tasks for\n\t\t\t\t\tservices that don't use a load balancer are considered healthy if they're in the\n\t\t\t\t\t\tRUNNING
state. Tasks for services that use a load balancer are\n\t\t\t\t\tconsidered healthy if they're in the RUNNING
state and are reported\n\t\t\t\t\tas healthy by the load balancer.
The maximumPercent
parameter represents an upper limit on the\n\t\t\t\t\tnumber of running tasks during a deployment. You can use it to define the\n\t\t\t\t\tdeployment batch size. For example, if desiredCount
is four tasks,\n\t\t\t\t\ta maximum of 200% starts four new tasks before stopping the four older tasks\n\t\t\t\t\t(provided that the cluster resources required to do this are available).
When UpdateService stops a task during a deployment, the equivalent\n\t\t\tof docker stop
is issued to the containers running in the task. This\n\t\t\tresults in a SIGTERM
and a 30-second timeout. After this,\n\t\t\t\tSIGKILL
is sent and the containers are forcibly stopped. If the\n\t\t\tcontainer handles the SIGTERM
gracefully and exits within 30 seconds from\n\t\t\treceiving it, no SIGKILL
is sent.
When the service scheduler launches new tasks, it determines task placement in your\n\t\t\tcluster with the following logic.
\n\t\tDetermine which of the container instances in your cluster can support your\n\t\t\t\t\tservice's task definition. For example, they have the required CPU, memory,\n\t\t\t\t\tports, and container instance attributes.
\n\t\t\tBy default, the service scheduler attempts to balance tasks across\n\t\t\t\t\tAvailability Zones in this manner even though you can choose a different\n\t\t\t\t\tplacement strategy.
\n\t\t\t\tSort the valid container instances by the fewest number of running\n\t\t\t\t\t\t\ttasks for this service in the same Availability Zone as the instance.\n\t\t\t\t\t\t\tFor example, if zone A has one running service task and zones B and C\n\t\t\t\t\t\t\teach have zero, valid container instances in either zone B or C are\n\t\t\t\t\t\t\tconsidered optimal for placement.
\n\t\t\t\t\tPlace the new service task on a valid container instance in an optimal\n\t\t\t\t\t\t\tAvailability Zone (based on the previous steps), favoring container\n\t\t\t\t\t\t\tinstances with the fewest number of running tasks for this\n\t\t\t\t\t\t\tservice.
\n\t\t\t\t\tWhen the service scheduler stops running tasks, it attempts to maintain balance across\n\t\t\tthe Availability Zones in your cluster using the following logic:
\n\t\tSort the container instances by the largest number of running tasks for this\n\t\t\t\t\tservice in the same Availability Zone as the instance. For example, if zone A\n\t\t\t\t\thas one running service task and zones B and C each have two, container\n\t\t\t\t\tinstances in either zone B or C are considered optimal for termination.
\n\t\t\tStop the task on a container instance in an optimal Availability Zone (based\n\t\t\t\t\ton the previous steps), favoring container instances with the largest number of\n\t\t\t\t\trunning tasks for this service.
\n\t\t\tYou must have a service-linked role when you update any of the following service\n\t\t\t\tproperties. If you specified a custom IAM role when you created the service, Amazon ECS\n\t\t\t\tautomatically replaces the roleARN associated with the service with the ARN of your\n\t\t\t\tservice-linked role. For more information, see Service-linked roles in the Amazon Elastic Container Service Developer Guide.
\n\t\t\t\n loadBalancers,
\n
\n serviceRegistries
\n
The capacity provider strategy to update the service to use.
\nif the service uses the default capacity provider strategy for the cluster, the\n\t\t\tservice can be updated to use one or more capacity providers as opposed to the default\n\t\t\tcapacity provider strategy. However, when a service is using a capacity provider\n\t\t\tstrategy that's not the default capacity provider strategy, the service can't be updated\n\t\t\tto use the cluster's default capacity provider strategy.
\nA capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase
and weight
to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity\n\t\t\tprovider with a cluster. Only capacity providers with an ACTIVE
or\n\t\t\t\tUPDATING
status can be used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\nTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The PutClusterCapacityProviders API operation is used to update the\n\t\t\tlist of available capacity providers for a cluster after the cluster is created.
\n " + "smithy.api#documentation": "The capacity provider strategy to update the service to use.
\n\t\tif the service uses the default capacity provider strategy for the cluster, the\n\t\t\tservice can be updated to use one or more capacity providers as opposed to the default\n\t\t\tcapacity provider strategy. However, when a service is using a capacity provider\n\t\t\tstrategy that's not the default capacity provider strategy, the service can't be updated\n\t\t\tto use the cluster's default capacity provider strategy.
\n\t\tA capacity provider strategy consists of one or more capacity providers along with the\n\t\t\t\tbase
and weight
to assign to them. A capacity provider\n\t\t\tmust be associated with the cluster to be used in a capacity provider strategy. The\n\t\t\t\tPutClusterCapacityProviders API is used to associate a capacity\n\t\t\tprovider with a cluster. Only capacity providers with an ACTIVE
or\n\t\t\t\tUPDATING
status can be used.
If specifying a capacity provider that uses an Auto Scaling group, the capacity\n\t\t\tprovider must already be created. New capacity providers can be created with the CreateCapacityProvider API operation.
\n\t\tTo use a Fargate capacity provider, specify either the FARGATE
or\n\t\t\t\tFARGATE_SPOT
capacity providers. The Fargate capacity providers are\n\t\t\tavailable to all accounts and only need to be associated with a cluster to be\n\t\t\tused.
The PutClusterCapacityProviders API operation is used to update the\n\t\t\tlist of available capacity providers for a cluster after the cluster is created.
\n\t\t " } }, "deploymentConfiguration": { @@ -12239,13 +12193,13 @@ "placementConstraints": { "target": "com.amazonaws.ecs#PlacementConstraints", "traits": { - "smithy.api#documentation": "An array of task placement constraint objects to update the service to use. If no\n\t\t\tvalue is specified, the existing placement constraints for the service will remain\n\t\t\tunchanged. If this value is specified, it will override any existing placement\n\t\t\tconstraints defined for the service. To remove all existing placement constraints,\n\t\t\tspecify an empty array.
\nYou can specify a maximum of 10 constraints for each task. This limit includes\n\t\t\tconstraints in the task definition and those specified at runtime.
" + "smithy.api#documentation": "An array of task placement constraint objects to update the service to use. If no\n\t\t\tvalue is specified, the existing placement constraints for the service will remain\n\t\t\tunchanged. If this value is specified, it will override any existing placement\n\t\t\tconstraints defined for the service. To remove all existing placement constraints,\n\t\t\tspecify an empty array.
\n\t\tYou can specify a maximum of 10 constraints for each task. This limit includes\n\t\t\tconstraints in the task definition and those specified at runtime.
" } }, "placementStrategy": { "target": "com.amazonaws.ecs#PlacementStrategies", "traits": { - "smithy.api#documentation": "The task placement strategy objects to update the service to use. If no value is\n\t\t\tspecified, the existing placement strategy for the service will remain unchanged. If\n\t\t\tthis value is specified, it will override the existing placement strategy defined for\n\t\t\tthe service. To remove an existing placement strategy, specify an empty object.
\nYou can specify a maximum of five strategy rules for each service.
" + "smithy.api#documentation": "The task placement strategy objects to update the service to use. If no value is\n\t\t\tspecified, the existing placement strategy for the service will remain unchanged. If\n\t\t\tthis value is specified, it will override the existing placement strategy defined for\n\t\t\tthe service. To remove an existing placement strategy, specify an empty object.
\n\t\tYou can specify a maximum of five strategy rules for each service.
" } }, "platformVersion": { @@ -12270,37 +12224,37 @@ "enableExecuteCommand": { "target": "com.amazonaws.ecs#BoxedBoolean", "traits": { - "smithy.api#documentation": "If true
, this enables execute command functionality on all task\n\t\t\tcontainers.
If you do not want to override the value that was set when the service was created,\n\t\t\tyou can set this to null
when performing this action.
If true
, this enables execute command functionality on all task\n\t\t\tcontainers.
If you do not want to override the value that was set when the service was created,\n\t\t\tyou can set this to null
when performing this action.
Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.
\nOnly tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment
to true
, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.
Determines whether to turn on Amazon ECS managed tags for the tasks in the service. For\n\t\t\tmore information, see Tagging Your Amazon ECS\n\t\t\t\tResources in the Amazon Elastic Container Service Developer Guide.
\n\t\tOnly tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment
to true
, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.
A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.
\nWhen you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks\n\t\t\twith the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks\n\t\t\tare running.
\nFor services that use rolling updates, you can add, update, or remove Elastic Load Balancing target\n\t\t\tgroups. You can update from a single target group to multiple target groups and from\n\t\t\tmultiple target groups to a single target group.
\nFor services that use blue/green deployments, you can update Elastic Load Balancing target groups by\n\t\t\tusing \n CreateDeployment\n
through CodeDeploy. Note that multiple target groups\n\t\t\tare not supported for blue/green deployments. For more information see Register\n\t\t\t\tmultiple target groups with a service in the Amazon Elastic Container Service Developer Guide.
For services that use the external deployment controller, you can add, update, or\n\t\t\tremove load balancers by using CreateTaskSet.\n\t\t\tNote that multiple target groups are not supported for external deployments. For more\n\t\t\tinformation see Register\n\t\t\t\tmultiple target groups with a service in the Amazon Elastic Container Service Developer Guide.
\nYou can remove existing loadBalancers
by passing an empty list.
A list of Elastic Load Balancing load balancer objects. It contains the load balancer name, the\n\t\t\tcontainer name, and the container port to access from the load balancer. The container\n\t\t\tname is as it appears in a container definition.
\n\t\tWhen you add, update, or remove a load balancer configuration, Amazon ECS starts new tasks\n\t\t\twith the updated Elastic Load Balancing configuration, and then stops the old tasks when the new tasks\n\t\t\tare running.
\n\t\tFor services that use rolling updates, you can add, update, or remove Elastic Load Balancing target\n\t\t\tgroups. You can update from a single target group to multiple target groups and from\n\t\t\tmultiple target groups to a single target group.
\n\t\tFor services that use blue/green deployments, you can update Elastic Load Balancing target groups by\n\t\t\tusing \n CreateDeployment\n
through CodeDeploy. Note that multiple target groups\n\t\t\tare not supported for blue/green deployments. For more information see Register\n\t\t\t\tmultiple target groups with a service in the Amazon Elastic Container Service Developer Guide.
For services that use the external deployment controller, you can add, update, or\n\t\t\tremove load balancers by using CreateTaskSet.\n\t\t\tNote that multiple target groups are not supported for external deployments. For more\n\t\t\tinformation see Register\n\t\t\t\tmultiple target groups with a service in the Amazon Elastic Container Service Developer Guide.
\n\t\tYou can remove existing loadBalancers
by passing an empty list.
Determines whether to propagate the tags from the task definition or the service to\n\t\t\tthe task. If no value is specified, the tags aren't propagated.
\nOnly tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment
to true
, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.
Determines whether to propagate the tags from the task definition or the service to\n\t\t\tthe task. If no value is specified, the tags aren't propagated.
\n\t\tOnly tasks launched after the update will reflect the update. To update the tags on\n\t\t\tall tasks, set forceNewDeployment
to true
, so that Amazon ECS\n\t\t\tstarts new tasks with the updated tags.
The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.
\nWhen you add, update, or remove the service registries configuration, Amazon ECS starts new\n\t\t\ttasks with the updated service registries configuration, and then stops the old tasks\n\t\t\twhen the new tasks are running.
\nYou can remove existing serviceRegistries
by passing an empty\n\t\t\tlist.
The details for the service discovery registries to assign to this service. For more\n\t\t\tinformation, see Service\n\t\t\t\tDiscovery.
\n\t\tWhen you add, update, or remove the service registries configuration, Amazon ECS starts new\n\t\t\ttasks with the updated service registries configuration, and then stops the old tasks\n\t\t\twhen the new tasks are running.
\n\t\tYou can remove existing serviceRegistries
by passing an empty\n\t\t\tlist.
The configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" + "smithy.api#documentation": "The configuration for this service to discover and connect to\n\tservices, and be discovered by, and connected from, other services within a namespace.
\nTasks that run in a namespace can use short names to connect\n\tto services in the namespace. Tasks can connect to services across all of the clusters in the namespace.\n\tTasks connect through a managed proxy container\n\tthat collects logs and metrics for increased visibility.\n\tOnly the tasks that Amazon ECS services create are supported with Service Connect.\n\tFor more information, see Service Connect in the Amazon Elastic Container Service Developer Guide.
" } } } @@ -12348,7 +12302,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the protection status of a task. You can set protectionEnabled
to\n\t\t\t\ttrue
to protect your task from termination during scale-in events from\n\t\t\t\tService\n\t\t\t\tAutoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS unsets the\n\t\t\t\tprotectionEnabled
property making the task eligible for termination by\n\t\t\ta subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). To specify the custom expiration period, set the\n\t\t\t\texpiresInMinutes
property. The expiresInMinutes
property\n\t\t\tis always reset when you invoke this operation for a task that already has\n\t\t\t\tprotectionEnabled
set to true
. You can keep extending the\n\t\t\tprotection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in\n\t\t\t\tprotection in the \n Amazon Elastic Container Service Developer Guide\n .
\nThis operation is only supported for tasks belonging to an Amazon ECS service. Invoking\n\t\t\t\tthis operation for a standalone task will result in an TASK_NOT_VALID
\n\t\t\t\tfailure. For more information, see API failure\n\t\t\t\t\treasons.
If you prefer to set task protection from within the container, we recommend using\n\t\t\t\tthe Task scale-in protection endpoint.
\nUpdates the protection status of a task. You can set protectionEnabled
to\n\t\t\t\ttrue
to protect your task from termination during scale-in events from\n\t\t\t\tService\n\t\t\t\tAutoscaling or deployments.
Task-protection, by default, expires after 2 hours at which point Amazon ECS unsets the\n\t\t\t\tprotectionEnabled
property making the task eligible for termination by\n\t\t\ta subsequent scale-in event.
You can specify a custom expiration period for task protection from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). To specify the custom expiration period, set the\n\t\t\t\texpiresInMinutes
property. The expiresInMinutes
property\n\t\t\tis always reset when you invoke this operation for a task that already has\n\t\t\t\tprotectionEnabled
set to true
. You can keep extending the\n\t\t\tprotection expiration period of a task by invoking this operation repeatedly.
To learn more about Amazon ECS task protection, see Task scale-in\n\t\t\t\tprotection in the \n Amazon Elastic Container Service Developer Guide\n .
\n\t\tThis operation is only supported for tasks belonging to an Amazon ECS service. Invoking\n\t\t\t\tthis operation for a standalone task will result in an TASK_NOT_VALID
\n\t\t\t\tfailure. For more information, see API failure\n\t\t\t\t\treasons.
If you prefer to set task protection from within the container, we recommend using\n\t\t\t\tthe Task scale-in protection endpoint.
\n\t\tIf you set protectionEnabled
to true
, you can specify the\n\t\t\tduration for task protection in minutes. You can specify a value from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). During this time, your task will not be terminated by scale-in\n\t\t\tevents from Service Auto Scaling or deployments. After this time period lapses,\n\t\t\t\tprotectionEnabled
will be reset to false
.
If you don’t specify the time, then the task is automatically protected for 120\n\t\t\tminutes (2 hours).
" + "smithy.api#documentation": "If you set protectionEnabled
to true
, you can specify the\n\t\t\tduration for task protection in minutes. You can specify a value from 1 minute to up to\n\t\t\t2,880 minutes (48 hours). During this time, your task will not be terminated by scale-in\n\t\t\tevents from Service Auto Scaling or deployments. After this time period lapses,\n\t\t\t\tprotectionEnabled
will be reset to false
.
If you don’t specify the time, then the task is automatically protected for 120\n\t\t\tminutes (2 hours).
" } } } @@ -12390,7 +12344,7 @@ "protectedTasks": { "target": "com.amazonaws.ecs#ProtectedTasks", "traits": { - "smithy.api#documentation": "A list of tasks with the following information.
\n\n taskArn
: The task ARN.
\n protectionEnabled
: The protection status of the task. If scale-in\n\t\t\t\t\tprotection is enabled for a task, the value is true
. Otherwise, it\n\t\t\t\t\tis false
.
\n expirationDate
: The epoch time when protection for the task will\n\t\t\t\t\texpire.
A list of tasks with the following information.
\n\t\t\n taskArn
: The task ARN.
\n protectionEnabled
: The protection status of the task. If scale-in\n\t\t\t\t\tprotection is enabled for a task, the value is true
. Otherwise, it\n\t\t\t\t\tis false
.
\n expirationDate
: The epoch time when protection for the task will\n\t\t\t\t\texpire.
This parameter is specified when you use bind mount host volumes. The contents of the\n\t\t\t\thost
parameter determine whether your bind mount host volume persists\n\t\t\ton the host container instance and where it's stored. If the host
parameter\n\t\t\tis empty, then the Docker daemon assigns a host path for your data volume. However, the\n\t\t\tdata isn't guaranteed to persist after the containers that are associated with it stop\n\t\t\trunning.
Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData
. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives. For example, you can mount\n\t\t\t\tC:\\my\\path:C:\\my\\path
and D:\\:D:\\
, but not\n\t\t\t\tD:\\my\\path:C:\\my\\path
or D:\\:C:\\my\\path
.
This parameter is specified when you use bind mount host volumes. The contents of the\n\t\t\t\thost
parameter determine whether your bind mount host volume persists\n\t\t\ton the host container instance and where it's stored. If the host
parameter\n\t\t\tis empty, then the Docker daemon assigns a host path for your data volume. However, the\n\t\t\tdata isn't guaranteed to persist after the containers that are associated with it stop\n\t\t\trunning.
Windows containers can mount whole directories on the same drive as\n\t\t\t\t$env:ProgramData
. Windows containers can't mount directories on a\n\t\t\tdifferent drive, and mount point can't be across drives. For example, you can mount\n\t\t\t\tC:\\my\\path:C:\\my\\path
and D:\\:D:\\
, but not\n\t\t\t\tD:\\my\\path:C:\\my\\path
or D:\\:C:\\my\\path
.
This parameter is specified when you use Docker volumes.
\nWindows containers only support the use of the local
driver. To use bind\n\t\t\tmounts, specify the host
parameter instead.
Docker volumes aren't supported by tasks run on Fargate.
\nThis parameter is specified when you use Docker volumes.
\n\t\tWindows containers only support the use of the local
driver. To use bind\n\t\t\tmounts, specify the host
parameter instead.
Docker volumes aren't supported by tasks run on Fargate.
\n\t\tThe throughput, measured in\n MiB/s,\n that you want to provision for a file system that you're creating. Valid values are\n 1-1024. Required if ThroughputMode
is set to provisioned
. The upper\n limit for throughput is 1024 MiB/s. To increase this limit, contact Amazon Web Services Support. For\n more information, see Amazon EFS quotas that you can increase in the Amazon EFS User Guide.
The throughput, measured in MiB/s, that you want to provision for a file system that\n you're creating. Valid values are 1-1024. Required if ThroughputMode
is set\n to provisioned
. The upper limit for throughput is 1024 MiB/s. To increase this\n limit, contact Amazon Web Services Support. For more information, see Amazon EFS quotas that you can increase\n in the Amazon EFS User Guide.
Use this action to manage EFS lifecycle management and EFS Intelligent-Tiering. A\n LifecycleConfiguration
consists of one or more LifecyclePolicy
\n objects that define the following:
\n EFS Lifecycle management - When Amazon EFS\n automatically transitions files in a file system into the lower-cost EFS Infrequent Access\n (IA) storage class.
\nTo enable EFS Lifecycle management, set the value of TransitionToIA
to one of the available options.
\n EFS Intelligent-Tiering - When Amazon EFS\n automatically transitions files from IA back into the file system's primary storage class\n (EFS Standard or EFS One Zone Standard).
\nTo enable EFS Intelligent-Tiering, set the value of\n TransitionToPrimaryStorageClass
to AFTER_1_ACCESS
.
For more information, see EFS Lifecycle Management.
\nEach Amazon EFS file system supports one lifecycle configuration, which applies to\n all files in the file system. If a LifecycleConfiguration
object already exists\n for the specified file system, a PutLifecycleConfiguration
call modifies the\n existing configuration. A PutLifecycleConfiguration
call with an empty\n LifecyclePolicies
array in the request body deletes any existing\n LifecycleConfiguration
and turns off lifecycle management and EFS\n Intelligent-Tiering for the file system.
In the request, specify the following:
\nThe ID for the file system for which you are enabling, disabling, or modifying\n lifecycle management and EFS Intelligent-Tiering.
\nA LifecyclePolicies
array of LifecyclePolicy
objects that\n define when files are moved into IA storage, and when they are moved back to Standard storage.
Amazon EFS requires that each LifecyclePolicy
\n object have only have a single transition, so the LifecyclePolicies
array needs to be structured with separate \n LifecyclePolicy
objects. See the example requests in the following section for more information.
This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration
operation.
To apply a LifecycleConfiguration
object to an encrypted file system, you\n need the same Key Management Service permissions as when you created the encrypted file system.
Use this action to manage EFS lifecycle management and intelligent tiering. A \n LifecycleConfiguration
consists of one or more LifecyclePolicy
objects that \n define the following:
\n EFS Lifecycle management - When Amazon EFS \n automatically transitions files in a file system into the lower-cost Infrequent Access (IA) storage class.
\nTo enable EFS Lifecycle management, set the value of TransitionToIA
to one of the available options.
\n EFS Intelligent tiering - When Amazon EFS \n automatically transitions files from IA back into the file system's primary storage class (Standard or One Zone Standard.
\nTo enable EFS Intelligent Tiering, set the value of TransitionToPrimaryStorageClass
to AFTER_1_ACCESS
.
For more information, see EFS Lifecycle Management.
\nEach Amazon EFS file system supports one lifecycle configuration, which applies to all files in the file system. If a\n LifecycleConfiguration
object already exists for the specified file system, a\n PutLifecycleConfiguration
call modifies the existing configuration. A\n PutLifecycleConfiguration
call with an empty LifecyclePolicies
\n array in the request body deletes any existing LifecycleConfiguration
and\n turns off lifecycle management and intelligent tiering for the file system.
In the request, specify the following:
\nThe ID for the file system for which you are enabling, disabling, or modifying lifecycle management \n and intelligent tiering.
\nA LifecyclePolicies
array of LifecyclePolicy
objects that\n define when files are moved into IA storage, and when they are moved back to Standard storage.
Amazon EFS requires that each LifecyclePolicy
\n object have only have a single transition, so the LifecyclePolicies
array needs to be structured with separate \n LifecyclePolicy
objects. See the example requests in the following section for more information.
This operation requires permissions for the elasticfilesystem:PutLifecycleConfiguration
operation.
To apply a LifecycleConfiguration
object to an encrypted file system, you\n need the same Key Management Service permissions as when you created the encrypted file system.
Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy\n for you to run Kubernetes on Amazon Web Services without needing to stand up or maintain\n your own Kubernetes control plane. Kubernetes is an open-source system for automating\n the deployment, scaling, and management of containerized applications.
\nAmazon EKS runs up-to-date versions of the open-source Kubernetes software, so\n you can use all the existing plugins and tooling from the Kubernetes community.\n Applications running on Amazon EKS are fully compatible with applications\n running on any standard Kubernetes environment, whether running in on-premises data\n centers or public clouds. This means that you can easily migrate any standard Kubernetes\n application to Amazon EKS without any code modification required.
", + "smithy.api#documentation": "Amazon Elastic Kubernetes Service (Amazon EKS) is a managed service that makes it easy\n for you to run Kubernetes on Amazon Web Services without needing to stand up or maintain\n your own Kubernetes control plane. Kubernetes is an open-source system for automating\n the deployment, scaling, and management of containerized applications.
\nAmazon EKS runs up-to-date versions of the open-source Kubernetes software, so\n you can use all the existing plugins and tooling from the Kubernetes community.\n Applications running on Amazon EKS are fully compatible with applications\n running on any standard Kubernetes environment, whether running in on-premises data\n centers or public clouds. This means that you can easily migrate any standard Kubernetes\n application to Amazon EKS without any code modification required.
", "smithy.api#title": "Amazon Elastic Kubernetes Service", "smithy.rules#endpointRuleSet": { "version": "1.0", @@ -597,8 +573,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-south-2" } }, { @@ -610,8 +586,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-south-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-south-2" } }, { @@ -623,8 +599,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-south-2" } }, { @@ -636,8 +612,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-south-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-south-2" } }, { @@ -649,8 +625,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-south-1" } }, { @@ -662,8 +638,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-south-1" } }, { @@ -675,8 +651,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-south-1" } }, { @@ -688,8 +664,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-south-1" } }, { @@ -701,8 +677,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-south-1" } }, { @@ -714,8 +690,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-south-1" } }, { @@ -727,8 +703,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-south-1" } }, { @@ -740,8 +716,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-south-1" } }, { @@ -753,8 +729,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-south-2" } }, { @@ -766,8 +742,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-south-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-south-2" } }, { @@ -779,8 +755,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-south-2" } }, { @@ -792,8 +768,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-south-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-south-2" } }, { @@ -805,8 +781,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-gov-east-1" } }, { @@ -818,8 +794,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-gov-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-gov-east-1" } }, { @@ -831,8 +807,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-gov-east-1" } }, { @@ -844,8 +820,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-gov-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-gov-east-1" } }, { @@ -857,8 +833,8 @@ }, "params": { "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "me-central-1" } }, { @@ -870,8 +846,8 @@ }, "params": { "UseFIPS": true, - "Region": "me-central-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "me-central-1" } }, { @@ -883,8 +859,8 @@ }, "params": { "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "me-central-1" } }, { @@ -896,8 +872,8 @@ }, "params": { "UseFIPS": false, - "Region": "me-central-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "me-central-1" } }, { @@ -909,8 +885,8 @@ }, "params": { "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ca-central-1" } }, { @@ -922,8 +898,8 @@ }, "params": { "UseFIPS": true, - "Region": "ca-central-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ca-central-1" } }, { @@ -935,8 +911,8 @@ }, "params": { "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ca-central-1" } }, { @@ -948,8 +924,8 @@ }, "params": { "UseFIPS": false, - "Region": "ca-central-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ca-central-1" } }, { @@ -961,8 +937,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-central-1" } }, { @@ -974,8 +950,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-central-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-central-1" } }, { @@ -987,8 +963,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-central-1" } }, { @@ -1000,8 +976,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-central-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-central-1" } }, { @@ -1013,8 +989,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-central-2" } }, { @@ -1026,8 +1002,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-central-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-central-2" } }, { @@ -1039,8 +1015,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-central-2" } }, { @@ -1052,8 +1028,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-central-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-central-2" } }, { @@ -1065,8 +1041,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-west-1" } }, { @@ -1078,8 +1054,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-west-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-west-1" } }, { @@ -1091,8 +1067,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-west-1" } }, { @@ -1104,8 +1080,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-west-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-west-1" } }, { @@ -1117,8 +1093,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-west-2" } }, { @@ -1130,8 +1106,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-west-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-west-2" } }, { @@ -1143,8 +1119,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-west-2" } }, { @@ -1156,8 +1132,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-west-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-west-2" } }, { @@ -1169,8 +1145,8 @@ }, "params": { "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "af-south-1" } }, { @@ -1182,8 +1158,8 @@ }, "params": { "UseFIPS": true, - "Region": "af-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "af-south-1" } }, { @@ -1195,8 +1171,8 @@ }, "params": { "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "af-south-1" } }, { @@ -1208,8 +1184,8 @@ }, "params": { "UseFIPS": false, - "Region": "af-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "af-south-1" } }, { @@ -1221,8 +1197,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-north-1" } }, { @@ -1234,8 +1210,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-north-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-north-1" } }, { @@ -1247,8 +1223,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-north-1" } }, { @@ -1260,8 +1236,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-north-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-north-1" } }, { @@ -1273,8 +1249,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-west-3" } }, { @@ -1286,8 +1262,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-west-3", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-west-3" } }, { @@ -1299,8 +1275,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-west-3" } }, { @@ -1312,8 +1288,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-west-3", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-west-3" } }, { @@ -1325,8 +1301,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-west-2" } }, { @@ -1338,8 +1314,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-west-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-west-2" } }, { @@ -1351,8 +1327,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-west-2" } }, { @@ -1364,8 +1340,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-west-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-west-2" } }, { @@ -1377,8 +1353,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-west-1" } }, { @@ -1390,8 +1366,8 @@ }, "params": { "UseFIPS": true, - "Region": "eu-west-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-west-1" } }, { @@ -1403,8 +1379,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "eu-west-1" } }, { @@ -1416,8 +1392,8 @@ }, "params": { "UseFIPS": false, - "Region": "eu-west-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "eu-west-1" } }, { @@ -1429,8 +1405,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-northeast-3" } }, { @@ -1442,8 +1418,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-northeast-3", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-northeast-3" } }, { @@ -1455,8 +1431,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-northeast-3" } }, { @@ -1468,8 +1444,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-northeast-3", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-northeast-3" } }, { @@ -1481,8 +1457,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-northeast-2" } }, { @@ -1494,8 +1470,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-northeast-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-northeast-2" } }, { @@ -1507,8 +1483,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-northeast-2" } }, { @@ -1520,8 +1496,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-northeast-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-northeast-2" } }, { @@ -1533,8 +1509,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-northeast-1" } }, { @@ -1546,8 +1522,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-northeast-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-northeast-1" } }, { @@ -1559,8 +1535,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-northeast-1" } }, { @@ -1572,8 +1548,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-northeast-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-northeast-1" } }, { @@ -1585,8 +1561,8 @@ }, "params": { "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "me-south-1" } }, { @@ -1598,8 +1574,8 @@ }, "params": { "UseFIPS": true, - "Region": "me-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "me-south-1" } }, { @@ -1611,8 +1587,8 @@ }, "params": { "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "me-south-1" } }, { @@ -1624,8 +1600,8 @@ }, "params": { "UseFIPS": false, - "Region": "me-south-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "me-south-1" } }, { @@ -1637,8 +1613,8 @@ }, "params": { "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "sa-east-1" } }, { @@ -1650,8 +1626,8 @@ }, "params": { "UseFIPS": true, - "Region": "sa-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "sa-east-1" } }, { @@ -1663,8 +1639,8 @@ }, "params": { "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "sa-east-1" } }, { @@ -1676,8 +1652,8 @@ }, "params": { "UseFIPS": false, - "Region": "sa-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "sa-east-1" } }, { @@ -1689,8 +1665,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-east-1" } }, { @@ -1702,8 +1678,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-east-1" } }, { @@ -1715,8 +1691,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-east-1" } }, { @@ -1728,8 +1704,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-east-1" } }, { @@ -1741,8 +1717,8 @@ }, "params": { "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "cn-north-1" } }, { @@ -1754,8 +1730,8 @@ }, "params": { "UseFIPS": true, - "Region": "cn-north-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "cn-north-1" } }, { @@ -1767,8 +1743,8 @@ }, "params": { "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "cn-north-1" } }, { @@ -1780,8 +1756,8 @@ }, "params": { "UseFIPS": false, - "Region": "cn-north-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "cn-north-1" } }, { @@ -1793,8 +1769,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-gov-west-1" } }, { @@ -1806,8 +1782,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-gov-west-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-gov-west-1" } }, { @@ -1819,8 +1795,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-gov-west-1" } }, { @@ -1832,8 +1808,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-gov-west-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-gov-west-1" } }, { @@ -1845,8 +1821,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-1" } }, { @@ -1858,8 +1834,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-1" } }, { @@ -1871,8 +1847,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-1" } }, { @@ -1884,8 +1860,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-1" } }, { @@ -1897,8 +1873,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-2" } }, { @@ -1910,8 +1886,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-2" } }, { @@ -1923,8 +1899,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-2" } }, { @@ -1936,8 +1912,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-2" } }, { @@ -1947,8 +1923,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-iso-east-1" } }, { @@ -1960,8 +1936,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-iso-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-iso-east-1" } }, { @@ -1971,8 +1947,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-iso-east-1" } }, { @@ -1984,8 +1960,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-iso-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-iso-east-1" } }, { @@ -1997,8 +1973,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-3" } }, { @@ -2010,8 +1986,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-3", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-3" } }, { @@ -2023,8 +1999,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-3" } }, { @@ -2036,8 +2012,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-3", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-3" } }, { @@ -2049,8 +2025,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-4" } }, { @@ -2062,8 +2038,8 @@ }, "params": { "UseFIPS": true, - "Region": "ap-southeast-4", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-4" } }, { @@ -2075,8 +2051,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-4", - "UseDualStack": true + "UseDualStack": true, + "Region": "ap-southeast-4" } }, { @@ -2088,8 +2064,8 @@ }, "params": { "UseFIPS": false, - "Region": "ap-southeast-4", - "UseDualStack": false + "UseDualStack": false, + "Region": "ap-southeast-4" } }, { @@ -2101,8 +2077,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-east-1" } }, { @@ -2114,8 +2090,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-east-1" } }, { @@ -2127,8 +2103,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-east-1" } }, { @@ -2140,8 +2116,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-east-1" } }, { @@ -2153,8 +2129,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-east-2" } }, { @@ -2166,8 +2142,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-east-2" } }, { @@ -2179,8 +2155,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-east-2" } }, { @@ -2192,8 +2168,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-2", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-east-2" } }, { @@ -2205,8 +2181,8 @@ }, "params": { "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "cn-northwest-1" } }, { @@ -2218,8 +2194,8 @@ }, "params": { "UseFIPS": true, - "Region": "cn-northwest-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "cn-northwest-1" } }, { @@ -2231,8 +2207,8 @@ }, "params": { "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "cn-northwest-1" } }, { @@ -2244,8 +2220,8 @@ }, "params": { "UseFIPS": false, - "Region": "cn-northwest-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "cn-northwest-1" } }, { @@ -2255,8 +2231,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-isob-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-isob-east-1" } }, { @@ -2268,8 +2244,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-isob-east-1" } }, { @@ -2279,8 +2255,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": true + "UseDualStack": true, + "Region": "us-isob-east-1" } }, { @@ -2292,8 +2268,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-isob-east-1", - "UseDualStack": false + "UseDualStack": false, + "Region": "us-isob-east-1" } }, { @@ -2305,8 +2281,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -2317,8 +2293,8 @@ }, "params": { "UseFIPS": true, - "Region": "us-east-1", "UseDualStack": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -2329,8 +2305,8 @@ }, "params": { "UseFIPS": false, - "Region": "us-east-1", "UseDualStack": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } } @@ -2382,7 +2358,7 @@ "health": { "target": "com.amazonaws.eks#AddonHealth", "traits": { - "smithy.api#documentation": "An object that represents the health of the add-on.
" + "smithy.api#documentation": "An object representing the health of the add-on.
" } }, "addonArn": { @@ -2406,7 +2382,7 @@ "serviceAccountRoleArn": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that's bound to the Kubernetes service account\n that the add-on uses.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that is bound to the Kubernetes service\n account used by the add-on.
" } }, "tags": { @@ -2436,7 +2412,7 @@ "configurationValues": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The configuration values that you provided.
" + "smithy.api#documentation": "The provided configuration values.
" } } }, @@ -2714,7 +2690,7 @@ } ], "traits": { - "smithy.api#documentation": "Associate encryption configuration to an existing cluster.
\nYou can use this API to enable encryption on existing clusters which do not have\n encryption already enabled. This allows you to implement a defense-in-depth security\n strategy without migrating applications to new Amazon EKS clusters.
", + "smithy.api#documentation": "Associate encryption configuration to an existing cluster.
\nYou can use this API to enable encryption on existing clusters which do not have\n encryption already enabled. This allows you to implement a defense-in-depth security\n strategy without migrating applications to new Amazon EKS clusters.
", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/encryption-config/associate", @@ -2786,7 +2762,7 @@ } ], "traits": { - "smithy.api#documentation": "Associate an identity provider configuration to a cluster.
\nIf you want to authenticate identities using an identity provider, you can create an\n identity provider configuration and associate it to your cluster. After configuring\n authentication to your cluster you can create Kubernetes roles
and\n clusterroles
to assign permissions to the roles, and then bind the\n roles to the identities using Kubernetes rolebindings
and\n clusterrolebindings
. For more information see Using RBAC\n Authorization in the Kubernetes documentation.
Associate an identity provider configuration to a cluster.
\nIf you want to authenticate identities using an identity provider, you can create an\n identity provider configuration and associate it to your cluster. After configuring\n authentication to your cluster you can create Kubernetes roles
and\n clusterroles
to assign permissions to the roles, and then bind the\n roles to the identities using Kubernetes rolebindings
and\n clusterrolebindings
. For more information see Using RBAC\n Authorization in the Kubernetes documentation.
Creates an Amazon EKS add-on.
\nAmazon EKS add-ons help to automate the provisioning and lifecycle management\n of common operational software for Amazon EKS clusters. For more information,\n see Amazon EKS add-ons in the Amazon EKS User Guide.
", + "smithy.api#documentation": "Creates an Amazon EKS add-on.
\nAmazon EKS add-ons help to automate the provisioning and lifecycle management\n of common operational software for Amazon EKS clusters. For more information,\n see Amazon EKS add-ons in the Amazon EKS User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/addons", @@ -3450,7 +3426,7 @@ "addonName": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The name of the add-on. The name must match one of the names that \n DescribeAddonVersions
\n returns.
The name of the add-on. The name must match one of the names returned by \n DescribeAddonVersions
\n .
The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the\n permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide.
\nTo specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for\n your cluster. For more information, see Enabling\n IAM roles for service accounts on your cluster in the\n Amazon EKS User Guide.
\nThe Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the\n permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide.
\nTo specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for\n your cluster. For more information, see Enabling\n IAM roles for service accounts on your cluster in the\n Amazon EKS User Guide.
\nHow to resolve field value conflicts for an Amazon EKS add-on. Conflicts are\n handled based on the value you choose:
\n\n None – If the self-managed version of\n the add-on is installed on your cluster, Amazon EKS doesn't change the\n value. Creation of the add-on might fail.
\n\n Overwrite – If the self-managed\n version of the add-on is installed on your cluster and the Amazon EKS\n default value is different than the existing value, Amazon EKS changes\n the value to the Amazon EKS default value.
\n\n Preserve – Not supported. You can set\n this value when updating an add-on though. For more information, see UpdateAddon.
\nIf you don't currently have the self-managed version of the add-on installed on your\n cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values\n to default values, regardless of the option that you specify.
" + "smithy.api#documentation": "How to resolve field value conflicts for an Amazon EKS add-on. Conflicts are\n handled based on the value you choose:
\n\n None – If the self-managed version of\n the add-on is installed on your cluster, Amazon EKS doesn't change the\n value. Creation of the add-on might fail.
\n\n Overwrite – If the self-managed\n version of the add-on is installed on your cluster and the Amazon EKS\n default value is different than the existing value, Amazon EKS changes\n the value to the Amazon EKS default value.
\n\n Preserve – Not supported. You can set\n this value when updating an add-on though. For more information, see UpdateAddon.
\nIf you don't currently have the self-managed version of the add-on installed on your\n cluster, the Amazon EKS add-on is installed. Amazon EKS sets all values\n to default values, regardless of the option that you specify.
" } }, "clientRequestToken": { @@ -3488,7 +3464,7 @@ "configurationValues": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The set of configuration values for the add-on that's created. The values that you\n provide are validated against the schema in \n DescribeAddonConfiguration
\n .
The set of configuration values for the add-on being created. Whatever values\n provided here are validated against the schema from \n DescribeAddonConfiguration
\n .
Creates an Amazon EKS control plane.
\nThe Amazon EKS control plane consists of control plane instances that run the\n Kubernetes software, such as etcd
and the API server. The control plane\n runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by\n the Amazon EKS API server endpoint. Each Amazon EKS cluster control\n plane is single tenant and unique. It runs on its own set of Amazon EC2\n instances.
The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing\n Network Load Balancer. Amazon EKS also provisions elastic network interfaces in\n your VPC subnets to provide connectivity from the control plane instances to the nodes\n (for example, to support kubectl exec
, logs
, and\n proxy
data flows).
Amazon EKS nodes run in your Amazon Web Services account and connect to your\n cluster's control plane over the Kubernetes API server endpoint and a certificate file\n that is created for your cluster.
\nIn most cases, it takes several minutes to create a cluster. After you create an\n Amazon EKS cluster, you must configure your Kubernetes tooling to\n communicate with the API server and launch nodes into your cluster. For more\n information, see Managing Cluster Authentication and Launching\n Amazon EKS nodes in the Amazon EKS User Guide.
", + "smithy.api#documentation": "Creates an Amazon EKS control plane.
\nThe Amazon EKS control plane consists of control plane instances that run the\n Kubernetes software, such as etcd
and the API server. The control plane\n runs in an account managed by Amazon Web Services, and the Kubernetes API is exposed by\n the Amazon EKS API server endpoint. Each Amazon EKS cluster control\n plane is single tenant and unique. It runs on its own set of Amazon EC2\n instances.
The cluster control plane is provisioned across multiple Availability Zones and\n fronted by an Elastic Load Balancing\n Network Load Balancer. Amazon EKS also provisions elastic network interfaces in\n your VPC subnets to provide connectivity from the control plane instances to the nodes\n (for example, to support kubectl exec
, logs
, and\n proxy
data flows).
Amazon EKS nodes run in your Amazon Web Services account and connect to your\n cluster's control plane over the Kubernetes API server endpoint and a certificate file\n that is created for your cluster.
\n \nIn most cases, it takes several minutes to create a cluster. After you create an\n Amazon EKS cluster, you must configure your Kubernetes tooling to\n communicate with the API server and launch nodes into your cluster. For more\n information, see Managing Cluster Authentication and Launching\n Amazon EKS nodes in the Amazon EKS User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/clusters", @@ -3554,7 +3530,7 @@ "version": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The desired Kubernetes version for your cluster. If you don't specify a value here,\n the default version available in Amazon EKS is used.
\nThe default version might not be the latest version available.
\nThe desired Kubernetes version for your cluster. If you don't specify a value here,\n the default version available in Amazon EKS is used.
\nThe default version might not be the latest version available.
\nEnable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to\n CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.
\nEnable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to\n CloudWatch Logs. For more information, see Amazon EKS Cluster control plane logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.
\nCreates an Fargate profile for your Amazon EKS cluster. You\n must have at least one Fargate profile in a cluster to be able to run\n pods on Fargate.
\nThe Fargate profile allows an administrator to declare which pods run\n on Fargate and specify which pods run on which Fargate\n profile. This declaration is done through the profile’s selectors. Each profile can have\n up to five selectors that contain a namespace and labels. A namespace is required for\n every selector. The label field consists of multiple optional key-value pairs. Pods that\n match the selectors are scheduled on Fargate. If a to-be-scheduled pod\n matches any of the selectors in the Fargate profile, then that pod is run\n on Fargate.
\nWhen you create a Fargate profile, you must specify a pod execution\n role to use with the pods that are scheduled with the profile. This role is added to the\n cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the\n kubelet
that is running on the Fargate infrastructure\n can register with your Amazon EKS cluster so that it can appear in your cluster\n as a node. The pod execution role also provides IAM permissions to the\n Fargate infrastructure to allow read access to Amazon ECR\n image repositories. For more information, see Pod\n Execution Role in the Amazon EKS User Guide.
Fargate profiles are immutable. However, you can create a new updated\n profile to replace an existing profile and then delete the original after the updated\n profile has finished creating.
\nIf any Fargate profiles in a cluster are in the DELETING
\n status, you must wait for that Fargate profile to finish deleting before\n you can create any other profiles in that cluster.
For more information, see Fargate Profile in the\n Amazon EKS User Guide.
", + "smithy.api#documentation": "Creates an Fargate profile for your Amazon EKS cluster. You\n must have at least one Fargate profile in a cluster to be able to run\n pods on Fargate.
\nThe Fargate profile allows an administrator to declare which pods run\n on Fargate and specify which pods run on which Fargate\n profile. This declaration is done through the profile’s selectors. Each profile can have\n up to five selectors that contain a namespace and labels. A namespace is required for\n every selector. The label field consists of multiple optional key-value pairs. Pods that\n match the selectors are scheduled on Fargate. If a to-be-scheduled pod\n matches any of the selectors in the Fargate profile, then that pod is run\n on Fargate.
\nWhen you create a Fargate profile, you must specify a pod execution\n role to use with the pods that are scheduled with the profile. This role is added to the\n cluster's Kubernetes Role Based Access Control (RBAC) for authorization so that the\n kubelet
that is running on the Fargate infrastructure\n can register with your Amazon EKS cluster so that it can appear in your cluster\n as a node. The pod execution role also provides IAM permissions to the\n Fargate infrastructure to allow read access to Amazon ECR\n image repositories. For more information, see Pod\n Execution Role in the Amazon EKS User Guide.
Fargate profiles are immutable. However, you can create a new updated\n profile to replace an existing profile and then delete the original after the updated\n profile has finished creating.
\nIf any Fargate profiles in a cluster are in the DELETING
\n status, you must wait for that Fargate profile to finish deleting before\n you can create any other profiles in that cluster.
For more information, see Fargate Profile in the\n Amazon EKS User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/fargate-profiles", @@ -3753,7 +3729,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a managed node group for an Amazon EKS cluster. You can only create a\n node group for your cluster that is equal to the current Kubernetes version for the\n cluster. All node groups are created with the latest AMI release version for the\n respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using\n a launch template. For more information about using launch templates, see Launch\n template support.
\nAn Amazon EKS managed node group is an Amazon EC2\n Auto Scaling group and associated Amazon EC2 instances that are managed by\n Amazon Web Services for an Amazon EKS cluster. For more information, see\n Managed node groups in the Amazon EKS User Guide.
\nWindows AMI types are only supported for commercial Regions that support Windows\n Amazon EKS.
\nCreates a managed node group for an Amazon EKS cluster. You can only create a\n node group for your cluster that is equal to the current Kubernetes version for the\n cluster. All node groups are created with the latest AMI release version for the\n respective minor Kubernetes version of the cluster, unless you deploy a custom AMI using\n a launch template. For more information about using launch templates, see Launch\n template support.
\nAn Amazon EKS managed node group is an Amazon EC2\n Auto Scaling group and associated Amazon EC2 instances that are managed by\n Amazon Web Services for an Amazon EKS cluster. Each node group uses a version\n of the Amazon EKS optimized Amazon Linux 2 AMI. For more information, see Managed\n Node Groups in the Amazon EKS User Guide.
", "smithy.api#http": { "method": "POST", "uri": "/clusters/{clusterName}/node-groups", @@ -3788,7 +3764,7 @@ "diskSize": { "target": "com.amazonaws.eks#BoxedInteger", "traits": { - "smithy.api#documentation": "The root device disk size (in GiB) for your node group instances. The default disk\n size is 20 GiB for Linux and Bottlerocket. The default disk size is 50 GiB for Windows.\n If you specify launchTemplate
, then don't specify diskSize
, or the node group \n deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The root device disk size (in GiB) for your node group instances. The default disk\n size is 20 GiB. If you specify launchTemplate
, then don't specify diskSize
,\n or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
Specify the instance types for a node group. If you specify a GPU instance type, make\n sure to also specify an applicable GPU AMI type with the amiType
parameter.\n If you specify launchTemplate
, then you can specify zero or one instance\n type in your launch template or you can specify 0-20 instance types\n for instanceTypes
. If however, you specify an instance type in your launch\n template and specify any instanceTypes
, the node group\n deployment will fail. If you don't specify an instance type in a launch template or for\n instanceTypes
, then t3.medium
is used, by default. If you\n specify Spot
for capacityType
, then we recommend specifying\n multiple values for instanceTypes
. For more information, see Managed node group capacity types and Launch template support in\n the Amazon EKS User Guide.
Specify the instance types for a node group. If you specify a GPU instance type, be\n sure to specify AL2_x86_64_GPU
with the amiType
parameter. If\n you specify launchTemplate
, then you can specify zero or one instance type\n in your launch template or you can specify 0-20 instance types for\n instanceTypes
. If however, you specify an instance type in your launch\n template and specify any instanceTypes
, the node group\n deployment will fail. If you don't specify an instance type in a launch template or for\n instanceTypes
, then t3.medium
is used, by default. If you\n specify Spot
for capacityType
, then we recommend specifying\n multiple values for instanceTypes
. For more information, see Managed node group capacity types and Launch template support in\n the Amazon EKS User Guide.
The AMI type for your node group. If you specify launchTemplate
, and your launch template uses a custom AMI,\n then don't specify amiType
, or the node group deployment\n will fail. If your launch template uses a Windows custom AMI, then add\n eks:kube-proxy-windows
to your Windows nodes rolearn
in\n the aws-auth
\n ConfigMap
. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The AMI type for your node group. GPU instance types should use the\n AL2_x86_64_GPU
AMI type. Non-GPU instances should use the\n AL2_x86_64
AMI type. Arm instances should use the\n AL2_ARM_64
AMI type. All types use the Amazon EKS optimized\n Amazon Linux 2 AMI. If you specify launchTemplate
, and your launch template uses a custom AMI,\n then don't specify amiType
, or the node group deployment\n will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The remote access configuration to use with your node group.\n For Linux, the protocol is SSH. For Windows, the protocol is RDP.\n If you specify launchTemplate
, then don't specify \n remoteAccess
, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The remote access (SSH) configuration to use with your node group. If you specify launchTemplate
,\n then don't specify remoteAccess
, or the node group deployment\n will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes version\n is used. For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.
\nIf you specify launchTemplate
, and your launch template uses a custom AMI, then don't specify \n releaseVersion
, or the node group deployment will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The AMI version of the Amazon EKS optimized AMI to use with your node group.\n By default, the latest available AMI version for the node group's current Kubernetes\n version is used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.\n If you specify launchTemplate
, and your launch template uses a custom AMI, then don't specify releaseVersion
,\n or the node group deployment will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
Delete an Amazon EKS add-on.
\nWhen you remove the add-on, it will also be deleted from the cluster. You can always\n manually start an add-on on the cluster using the Kubernetes API.
", + "smithy.api#documentation": "Delete an Amazon EKS add-on.
\nWhen you remove the add-on, it will also be deleted from the cluster. You can always\n manually start an add-on on the cluster using the Kubernetes API.
", "smithy.api#http": { "method": "DELETE", "uri": "/clusters/{clusterName}/addons/{addonName}", @@ -3988,7 +3964,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes the Amazon EKS cluster control plane.
\nIf you have active services in your cluster that are associated with a load balancer,\n you must delete those services before deleting the cluster so that the load balancers\n are deleted properly. Otherwise, you can have orphaned resources in your VPC that\n prevent you from being able to delete the VPC. For more information, see Deleting a\n Cluster in the Amazon EKS User Guide.
\nIf you have managed node groups or Fargate profiles attached to the\n cluster, you must delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.
", + "smithy.api#documentation": "Deletes the Amazon EKS cluster control plane.
\nIf you have active services in your cluster that are associated with a load balancer,\n you must delete those services before deleting the cluster so that the load balancers\n are deleted properly. Otherwise, you can have orphaned resources in your VPC that\n prevent you from being able to delete the VPC. For more information, see Deleting a\n Cluster in the Amazon EKS User Guide.
\nIf you have managed node groups or Fargate profiles attached to the\n cluster, you must delete them first. For more information, see DeleteNodegroup and DeleteFargateProfile.
", "smithy.api#http": { "method": "DELETE", "uri": "/clusters/{name}", @@ -4043,7 +4019,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an Fargate profile.
\nWhen you delete a Fargate profile, any pods running on Fargate that were created with the profile are deleted. If those pods match\n another Fargate profile, then they are scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then\n they are not scheduled on Fargate and they may remain in a pending\n state.
\nOnly one Fargate profile in a cluster can be in the\n DELETING
status at a time. You must wait for a Fargate\n profile to finish deleting before you can delete any other profiles in that\n cluster.
Deletes an Fargate profile.
\nWhen you delete a Fargate profile, any pods running on Fargate that were created with the profile are deleted. If those pods match\n another Fargate profile, then they are scheduled on Fargate with that profile. If they no longer match any Fargate profiles, then\n they are not scheduled on Fargate and they may remain in a pending\n state.
\nOnly one Fargate profile in a cluster can be in the\n DELETING
status at a time. You must wait for a Fargate\n profile to finish deleting before you can delete any other profiles in that\n cluster.
The name of the add-on. The name must match one of the names that \n DescribeAddonVersions
\n returns.
The name of the add-on. The name must match one of the names returned by \n DescribeAddonVersions
\n .
A JSON schema that's used to validate the configuration values that you provide when\n an addon is created or updated.
" + "smithy.api#documentation": "A JSON schema used to validate provided configuration values when creating or updating an addon.
" } } } @@ -4457,7 +4433,7 @@ "nextToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The nextToken
value returned from a previous paginated\n DescribeAddonVersionsRequest
where maxResults
was used and\n the results exceeded the value of that parameter. Pagination continues from the end of\n the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n DescribeAddonVersionsRequest
where maxResults
was used and\n the results exceeded the value of that parameter. Pagination continues from the end of\n the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n DescribeAddonVersionsResponse
where maxResults
was used\n and the results exceeded the value of that parameter. Pagination continues from the end\n of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n DescribeAddonVersionsResponse
where maxResults
was used\n and the results exceeded the value of that parameter. Pagination continues from the end\n of the previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nReturns descriptive information about an Amazon EKS cluster.
\nThe API server endpoint and certificate authority data returned by this operation are\n required for kubelet
and kubectl
to communicate with your\n Kubernetes API server. For more information, see Create a\n kubeconfig for Amazon EKS.
The API server endpoint and certificate authority data aren't available until the\n cluster reaches the ACTIVE
state.
Returns descriptive information about an Amazon EKS cluster.
\nThe API server endpoint and certificate authority data returned by this operation are\n required for kubelet
and kubectl
to communicate with your\n Kubernetes API server. For more information, see Create a\n kubeconfig for Amazon EKS.
The API server endpoint and certificate authority data aren't available until the\n cluster reaches the ACTIVE
state.
Returns descriptive information about an update against your Amazon EKS\n cluster or associated managed node group or Amazon EKS add-on.
\nWhen the status of the update is Succeeded
, the update is complete. If an\n update fails, the status is Failed
, and an error detail explains the reason\n for the failure.
Returns descriptive information about an update against your Amazon EKS\n cluster or associated managed node group or Amazon EKS add-on.
\nWhen the status of the update is Succeeded
, the update is complete. If an\n update fails, the status is Failed
, and an error detail explains the reason\n for the failure.
A brief description of the error.
\n\n SubnetNotFound: We couldn't find one of the\n subnets associated with the cluster.
\n\n SecurityGroupNotFound: We couldn't find one\n of the security groups associated with the cluster.
\n\n EniLimitReached: You have reached the elastic\n network interface limit for your account.
\n\n IpNotAvailable: A subnet associated with the\n cluster doesn't have any free IP addresses.
\n\n AccessDenied: You don't have permissions to\n perform the specified operation.
\n\n OperationNotPermitted: The service role\n associated with the cluster doesn't have the required access permissions for\n Amazon EKS.
\n\n VpcIdNotFound: We couldn't find the VPC\n associated with the cluster.
\nA brief description of the error.
\n\n SubnetNotFound: We couldn't find one of the\n subnets associated with the cluster.
\n\n SecurityGroupNotFound: We couldn't find one\n of the security groups associated with the cluster.
\n\n EniLimitReached: You have reached the elastic\n network interface limit for your account.
\n\n IpNotAvailable: A subnet associated with the\n cluster doesn't have any free IP addresses.
\n\n AccessDenied: You don't have permissions to\n perform the specified operation.
\n\n OperationNotPermitted: The service role\n associated with the cluster doesn't have the required access permissions for\n Amazon EKS.
\n\n VpcIdNotFound: We couldn't find the VPC\n associated with the cluster.
\nA brief description of the error.
\n\n AccessDenied: Amazon EKS or one or\n more of your managed nodes is failing to authenticate or authorize with your\n Kubernetes cluster API server.
\n\n AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch\n instances.
\n\n AutoScalingGroupNotFound: We couldn't find\n the Auto Scaling group associated with the managed node group. You may be\n able to recreate an Auto Scaling group with the same settings to\n recover.
\n\n ClusterUnreachable: Amazon EKS or one\n or more of your managed nodes is unable to to communicate with your Kubernetes\n cluster API server. This can happen if there are network disruptions or if API\n servers are timing out processing requests.
\n\n Ec2LaunchTemplateNotFound: We couldn't find\n the Amazon EC2 launch template for your managed node group. You may be\n able to recreate a launch template with the same settings to recover.
\n\n Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not\n match the version that Amazon EKS created. You may be able to revert to\n the version that Amazon EKS created to recover.
\n\n Ec2SecurityGroupDeletionFailure: We could not\n delete the remote access security group for your managed node group. Remove any\n dependencies from the security group.
\n\n Ec2SecurityGroupNotFound: We couldn't find\n the cluster security group for the cluster. You must recreate your\n cluster.
\n\n Ec2SubnetInvalidConfiguration: One or more\n Amazon EC2 subnets specified for a node group do not automatically\n assign public IP addresses to instances launched into it. If you want your\n instances to be assigned a public IP address, then you need to enable the\n auto-assign public IP address
setting for the subnet. See\n Modifying\n the public IPv4 addressing attribute for your subnet in the\n Amazon VPC User Guide.
\n IamInstanceProfileNotFound: We couldn't find\n the IAM instance profile for your managed node group. You may be\n able to recreate an instance profile with the same settings to recover.
\n\n IamNodeRoleNotFound: We couldn't find the\n IAM role for your managed node group. You may be able to\n recreate an IAM role with the same settings to recover.
\n\n InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance\n type. You may be able to request an Amazon EC2 instance limit increase\n to recover.
\n\n InsufficientFreeAddresses: One or more of the\n subnets associated with your managed node group does not have enough available\n IP addresses for new nodes.
\n\n InternalFailure: These errors are usually\n caused by an Amazon EKS server-side issue.
\n\n NodeCreationFailure: Your launched instances\n are unable to register with your Amazon EKS cluster. Common causes of\n this failure are insufficient node IAM\n role permissions or lack of outbound internet access for the nodes.\n
\nA brief description of the error.
\n\n AccessDenied: Amazon EKS or one or\n more of your managed nodes is failing to authenticate or authorize with your\n Kubernetes cluster API server.
\n\n AsgInstanceLaunchFailures: Your Auto Scaling group is experiencing failures while attempting to launch\n instances.
\n\n AutoScalingGroupNotFound: We couldn't find\n the Auto Scaling group associated with the managed node group. You may be\n able to recreate an Auto Scaling group with the same settings to\n recover.
\n\n ClusterUnreachable: Amazon EKS or one\n or more of your managed nodes is unable to to communicate with your Kubernetes\n cluster API server. This can happen if there are network disruptions or if API\n servers are timing out processing requests.
\n\n Ec2LaunchTemplateNotFound: We couldn't find\n the Amazon EC2 launch template for your managed node group. You may be\n able to recreate a launch template with the same settings to recover.
\n\n Ec2LaunchTemplateVersionMismatch: The Amazon EC2 launch template version for your managed node group does not\n match the version that Amazon EKS created. You may be able to revert to\n the version that Amazon EKS created to recover.
\n\n Ec2SecurityGroupDeletionFailure: We could not\n delete the remote access security group for your managed node group. Remove any\n dependencies from the security group.
\n\n Ec2SecurityGroupNotFound: We couldn't find\n the cluster security group for the cluster. You must recreate your\n cluster.
\n\n Ec2SubnetInvalidConfiguration: One or more\n Amazon EC2 subnets specified for a node group do not automatically\n assign public IP addresses to instances launched into it. If you want your\n instances to be assigned a public IP address, then you need to enable the\n auto-assign public IP address
setting for the subnet. See\n Modifying\n the public IPv4 addressing attribute for your subnet in the\n Amazon VPC User Guide.
\n IamInstanceProfileNotFound: We couldn't find\n the IAM instance profile for your managed node group. You may be\n able to recreate an instance profile with the same settings to recover.
\n\n IamNodeRoleNotFound: We couldn't find the\n IAM role for your managed node group. You may be able to\n recreate an IAM role with the same settings to recover.
\n\n InstanceLimitExceeded: Your Amazon Web Services account is unable to launch any more instances of the specified instance\n type. You may be able to request an Amazon EC2 instance limit increase\n to recover.
\n\n InsufficientFreeAddresses: One or more of the\n subnets associated with your managed node group does not have enough available\n IP addresses for new nodes.
\n\n InternalFailure: These errors are usually\n caused by an Amazon EKS server-side issue.
\n\n NodeCreationFailure: Your launched instances\n are unable to register with your Amazon EKS cluster. Common causes of\n this failure are insufficient node IAM\n role permissions or lack of outbound internet access for the nodes.\n
\nDon't specify a value if you select ipv6
for ipFamily. The CIDR block to assign Kubernetes service IP addresses from.\n If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16
\n or 172.20.0.0/16
CIDR blocks. We recommend that you specify a block that does not\n overlap with resources in other networks that are peered or connected to your VPC. The\n block must meet the following requirements:
Within one of the following private IP address blocks: 10.0.0.0/8
,\n 172.16.0.0/12
, or 192.168.0.0/16
.
Doesn't overlap with any CIDR block assigned to the VPC that you selected for\n VPC.
\nBetween /24 and /12.
\nYou can only specify a custom CIDR block when you create a cluster and can't\n change this value once the cluster is created.
\nDon't specify a value if you select ipv6
for ipFamily. The CIDR block to assign Kubernetes service IP addresses from.\n If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16
\n or 172.20.0.0/16
CIDR blocks. We recommend that you specify a block that does not\n overlap with resources in other networks that are peered or connected to your VPC. The\n block must meet the following requirements:
Within one of the following private IP address blocks: 10.0.0.0/8
,\n 172.16.0.0/12
, or 192.168.0.0/16
.
Doesn't overlap with any CIDR block assigned to the VPC that you selected for\n VPC.
\nBetween /24 and /12.
\nYou can only specify a custom CIDR block when you create a cluster and can't\n change this value once the cluster is created.
\nSpecify which IP family is used to assign Kubernetes pod and service IP addresses. If\n you don't specify a value, ipv4
is used by default. You can only specify an\n IP family when you create a cluster and can't change this value once the cluster is\n created. If you specify ipv6
, the VPC and subnets that you specify for\n cluster creation must have both IPv4
and IPv6
CIDR blocks assigned to them. You can't\n specify ipv6
for clusters in China Regions.
You can only specify ipv6
for 1.21
and later clusters that use version\n 1.10.1
or later of the Amazon VPC CNI add-on. If you specify ipv6
, then ensure\n that your VPC meets the requirements listed in the considerations listed in Assigning IPv6\n addresses to pods and services in the Amazon EKS User Guide.\n Kubernetes assigns services IPv6
addresses from the unique local address range\n (fc00::/7)
. You can't specify a custom IPv6
CIDR block. Pod addresses are assigned from\n the subnet's IPv6
CIDR.
Specify which IP family is used to assign Kubernetes pod and service IP addresses. If\n you don't specify a value, ipv4
is used by default. You can only specify an\n IP family when you create a cluster and can't change this value once the cluster is\n created. If you specify ipv6
, the VPC and subnets that you specify for\n cluster creation must have both IPv4
and IPv6
CIDR blocks assigned to them. You can't\n specify ipv6
for clusters in China Regions.
You can only specify ipv6
for 1.21
and later clusters that use version\n 1.10.1
or later of the Amazon VPC CNI add-on. If you specify ipv6
, then ensure\n that your VPC meets the requirements listed in the considerations listed in Assigning IPv6\n addresses to pods and services in the Amazon EKS User Guide.\n Kubernetes assigns services IPv6
addresses from the unique local address range\n (fc00::/7)
. You can't specify a custom IPv6
CIDR block. Pod addresses are assigned from\n the subnet's IPv6
CIDR.
The CIDR block that Kubernetes pod and service IP addresses are assigned from. Kubernetes\n assigns addresses from an IPv4 CIDR block assigned to a subnet that the node is in. If\n you didn't specify a CIDR block when you created the cluster, then Kubernetes assigns\n addresses from either the 10.100.0.0/16
or 172.20.0.0/16
CIDR\n blocks. If this was specified, then it was specified when the cluster was created and it\n can't be changed.
The CIDR block that Kubernetes pod and service IP addresses are assigned from.\n Kubernetes assigns addresses from an IPv4 CIDR block assigned to a subnet that the node\n is in. If you didn't specify a CIDR block when you created the cluster, then Kubernetes\n assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks. If this\n was specified, then it was specified when the cluster was created and it can't be\n changed.
" } }, "serviceIpv6Cidr": { @@ -5626,7 +5602,7 @@ "name": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The name of the launch template.
\nYou must specify either the launch template name or the launch template ID in the\n request, but not both.
" + "smithy.api#documentation": "The name of the launch template.
\nYou must specify either the launch template name or the launch template ID in the\n request, but not both.
" } }, "version": { @@ -5638,12 +5614,12 @@ "id": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The ID of the launch template.
\nYou must specify either the launch template ID or the launch template name in the\n request, but not both.
" + "smithy.api#documentation": "The ID of the launch template.
\nYou must specify either the launch template ID or the launch template name in the\n request, but not both.
" } } }, "traits": { - "smithy.api#documentation": "An object representing a node group launch template specification. The launch template\n can't include \n SubnetId
\n , \n IamInstanceProfile
\n , \n RequestSpotInstances
\n , \n HibernationOptions
\n , or \n TerminateInstances
\n , or the node group deployment or\n update will fail. For more information about launch templates, see \n CreateLaunchTemplate
\n in the Amazon EC2 API\n Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
You must specify either the launch template ID or the launch template name in the\n request, but not both.
" + "smithy.api#documentation": "An object representing a node group launch template specification. The launch template\n can't include \n SubnetId
\n , \n IamInstanceProfile
\n , \n RequestSpotInstances
\n , \n HibernationOptions
\n , or \n TerminateInstances
\n , or the node group deployment or\n update will fail. For more information about launch templates, see \n CreateLaunchTemplate
\n in the Amazon EC2 API\n Reference. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
You must specify either the launch template ID or the launch template name in the\n request, but not both.
" } }, "com.amazonaws.eks#ListAddons": { @@ -5707,7 +5683,7 @@ "nextToken": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The nextToken
value returned from a previous paginated\n ListAddonsRequest
where maxResults
was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n ListAddonsRequest
where maxResults
was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n ListAddonsResponse
where maxResults
was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n ListAddonsResponse
where maxResults
was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n ListClusters
request where maxResults
was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe nextToken
value returned from a previous paginated\n ListClusters
request where maxResults
was used and the\n results exceeded the value of that parameter. Pagination continues from the end of the\n previous results that returned the nextToken
value.
This token should be treated as an opaque identifier that is used only to\n retrieve the next items in a list and not for other programmatic purposes.
\nThe Kubernetes labels applied to the nodes in the node group.
\nOnly labels that are applied with the Amazon EKS API are shown here. There\n may be other Kubernetes labels applied to the nodes in this group.
\nThe Kubernetes labels applied to the nodes in the node group.
\nOnly labels that are applied with the Amazon EKS API are shown here. There\n may be other Kubernetes labels applied to the nodes in this group.
\nThe current number of nodes that the managed node group should maintain.
\nIf you use Cluster Autoscaler, you shouldn't change the desiredSize value\n directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale\n down.
\nWhenever this parameter changes, the number of worker nodes in the node group is\n updated to the specified size. If this parameter is given a value that is smaller than\n the current number of running worker nodes, the necessary number of worker nodes are\n terminated to match the given value.\n \n When using CloudFormation, no action occurs if you remove this parameter from your CFN\n template.
\nThis parameter can be different from minSize in some cases, such as when starting with\n extra hosts for testing. This parameter can also be different when you want to start\n with an estimated number of needed hosts, but let Cluster Autoscaler reduce the number\n if there are too many. When Cluster Autoscaler is used, the desiredSize parameter is\n altered by Cluster Autoscaler (but can be out-of-date for short periods of time).\n Cluster Autoscaler doesn't scale a managed node group lower than minSize or higher than\n maxSize.
" + "smithy.api#documentation": "The current number of nodes that the managed node group should maintain.
\nIf you use Cluster Autoscaler, you shouldn't change the desiredSize value\n directly, as this can cause the Cluster Autoscaler to suddenly scale up or scale\n down.
\nWhenever this parameter changes, the number of worker nodes in the node group is\n updated to the specified size. If this parameter is given a value that is smaller than\n the current number of running worker nodes, the necessary number of worker nodes are\n terminated to match the given value.\n \n When using CloudFormation, no action occurs if you remove this parameter from your CFN\n template.
\nThis parameter can be different from minSize in some cases, such as when starting with\n extra hosts for testing. This parameter can also be different when you want to start\n with an estimated number of needed hosts, but let Cluster Autoscaler reduce the number\n if there are too many. When Cluster Autoscaler is used, the desiredSize parameter is\n altered by Cluster Autoscaler (but can be out-of-date for short periods of time).\n Cluster Autoscaler doesn't scale a managed node group lower than minSize or higher than\n maxSize.
" } } }, @@ -6928,7 +6904,7 @@ "controlPlaneInstanceType": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The Amazon EC2 instance type that you want to use for your local Amazon EKS cluster on Outposts. Choose an instance type based on the number of nodes\n that your cluster will have. For more information, see Capacity\n considerations in the Amazon EKS User Guide.
\nThe instance type that you specify is used for all Kubernetes control plane instances. The\n instance type can't be changed after cluster creation. The control plane is not\n automatically scaled by Amazon EKS.
\n", + "smithy.api#documentation": "
The Amazon EC2 instance type that you want to use for your local Amazon EKS cluster on Outposts. Choose an instance type based on the number of nodes\n that your cluster will have. For more information, see Capacity\n considerations in the Amazon EKS User Guide.
\nThe instance type that you specify is used for all Kubernetes control plane instances. The\n instance type can't be changed after cluster creation. The control plane is not\n automatically scaled by Amazon EKS.
\n", "smithy.api#required": {} } }, @@ -7029,7 +7005,7 @@ } ], "traits": { - "smithy.api#documentation": "
Connects a Kubernetes cluster to the Amazon EKS control plane.
\nAny Kubernetes cluster can be connected to the Amazon EKS control plane to\n view current information about the cluster and its nodes.
\nCluster connection requires two steps. First, send a \n RegisterClusterRequest\n
to add it to the Amazon EKS\n control plane.
Second, a Manifest containing the activationID
and\n activationCode
must be applied to the Kubernetes cluster through it's\n native provider to provide visibility.
After the Manifest is updated and applied, then the connected cluster is visible to\n the Amazon EKS control plane. If the Manifest is not applied within three days,\n then the connected cluster will no longer be visible and must be deregistered. See DeregisterCluster.
", + "smithy.api#documentation": "Connects a Kubernetes cluster to the Amazon EKS control plane.
\nAny Kubernetes cluster can be connected to the Amazon EKS control plane to\n view current information about the cluster and its nodes.
\nCluster connection requires two steps. First, send a \n RegisterClusterRequest\n
to add it to the Amazon EKS\n control plane.
Second, a Manifest containing the activationID
and\n activationCode
must be applied to the Kubernetes cluster through it's\n native provider to provide visibility.
After the Manifest is updated and applied, then the connected cluster is visible to\n the Amazon EKS control plane. If the Manifest is not applied within three days,\n then the connected cluster will no longer be visible and must be deregistered. See DeregisterCluster.
", "smithy.api#http": { "method": "POST", "uri": "/cluster-registrations", @@ -7083,13 +7059,13 @@ "ec2SshKey": { "target": "com.amazonaws.eks#String", "traits": { - "smithy.api#documentation": "The Amazon EC2 SSH key name that provides access for SSH communication with\n the nodes in the managed node group. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances. For\n Windows, an Amazon EC2 SSH key is used to obtain the RDP password. For more\n information, see Amazon EC2 key pairs and Windows instances in\n the Amazon Elastic Compute Cloud User Guide for Windows Instances.
" + "smithy.api#documentation": "The Amazon EC2 SSH key name that provides access for SSH communication with the\n nodes in the managed node group. For more information, see Amazon EC2 key pairs and Linux instances in the Amazon Elastic Compute Cloud User Guide for Linux Instances.
" } }, "sourceSecurityGroups": { "target": "com.amazonaws.eks#StringList", "traits": { - "smithy.api#documentation": "The security group IDs that are allowed SSH access (port 22) to the nodes. For\n Windows, the port is 3389. If you specify an Amazon EC2 SSH key but don't\n specify a source security group when you create a managed node group, then the port on\n the nodes is opened to the internet (0.0.0.0/0
). For more information, see\n Security Groups for Your VPC in the Amazon Virtual Private Cloud User Guide.
The security group ids that are allowed SSH access (port 22) to the nodes. If you specify\n an Amazon EC2 SSH key but do not specify a source security group when you create\n a managed node group, then port 22 on the nodes is opened to the internet (0.0.0.0/0).\n For more information, see Security Groups for Your VPC in the\n Amazon Virtual Private Cloud User Guide.
" } } }, @@ -7615,13 +7591,13 @@ "serviceAccountRoleArn": { "target": "com.amazonaws.eks#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the\n permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide.
\nTo specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for\n your cluster. For more information, see Enabling\n IAM roles for service accounts on your cluster in the\n Amazon EKS User Guide.
\nThe Amazon Resource Name (ARN) of an existing IAM role to bind to the add-on's service account. The role must be assigned the IAM permissions required by the add-on. If you don't specify an existing IAM role, then the add-on uses the\n permissions assigned to the node IAM role. For more information, see Amazon EKS node IAM role in the Amazon EKS User Guide.
\nTo specify an existing IAM role, you must have an IAM OpenID Connect (OIDC) provider created for\n your cluster. For more information, see Enabling\n IAM roles for service accounts on your cluster in the\n Amazon EKS User Guide.
\nHow to resolve field value conflicts for an Amazon EKS add-on if you've\n changed a value from the Amazon EKS default value. Conflicts are handled based\n on the option you choose:
\n\n None – Amazon EKS doesn't\n change the value. The update might fail.
\n\n Overwrite – Amazon EKS\n overwrites the changed value back to the Amazon EKS default\n value.
\n\n Preserve – Amazon EKS\n preserves the value. If you choose this option, we recommend that you test any\n field and value changes on a non-production cluster before updating the add-on\n on your production cluster.
\nHow to resolve field value conflicts for an Amazon EKS add-on if you've\n changed a value from the Amazon EKS default value. Conflicts are handled based\n on the option you choose:
\n\n None – Amazon EKS doesn't\n change the value. The update might fail.
\n\n Overwrite – Amazon EKS\n overwrites the changed value back to the Amazon EKS default\n value.
\n\n Preserve – Amazon EKS\n preserves the value. If you choose this option, we recommend that you test any\n field and value changes on a non-production cluster before updating the add-on\n on your production cluster.
\nThe set of configuration values for the add-on that's created. The values that you\n provide are validated against the schema in DescribeAddonConfiguration.
" + "smithy.api#documentation": "The set of configuration values for the add-on being created. Whatever values provided here are validated against the schema from DescribeAddonConfiguration\n
" } } } @@ -7676,7 +7652,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an Amazon EKS cluster configuration. Your cluster continues to\n function during the update. The response output includes an update ID that you can use\n to track the status of your cluster update with the DescribeUpdate API\n operation.
\nYou can use this API operation to enable or disable exporting the Kubernetes control\n plane logs for your cluster to CloudWatch Logs. By default, cluster control plane\n logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.
\nYou can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .
\nYou can't update the subnets or security group IDs for an existing cluster.
\nCluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING
(this status transition is\n eventually consistent). When the update is complete (either Failed
or\n Successful
), the cluster status moves to Active
.
Updates an Amazon EKS cluster configuration. Your cluster continues to\n function during the update. The response output includes an update ID that you can use\n to track the status of your cluster update with the DescribeUpdate API\n operation.
\nYou can use this API operation to enable or disable exporting the Kubernetes control\n plane logs for your cluster to CloudWatch Logs. By default, cluster control plane\n logs aren't exported to CloudWatch Logs. For more information, see Amazon EKS Cluster Control Plane Logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.
\nYou can also use this API operation to enable or disable public and private access to\n your cluster's Kubernetes API server endpoint. By default, public access is enabled, and\n private access is disabled. For more information, see Amazon EKS cluster endpoint access control in the\n \n Amazon EKS User Guide\n .
\nYou can't update the subnets or security group IDs for an existing cluster.
\nCluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING
(this status transition is\n eventually consistent). When the update is complete (either Failed
or\n Successful
), the cluster status moves to Active
.
Enable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to\n CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.
\nEnable or disable exporting the Kubernetes control plane logs for your cluster to\n CloudWatch Logs. By default, cluster control plane logs aren't exported to\n CloudWatch Logs. For more information, see Amazon EKS cluster control plane logs in the\n \n Amazon EKS User Guide\n .
\nCloudWatch Logs ingestion, archive storage, and data scanning rates apply to\n exported control plane logs. For more information, see CloudWatch\n Pricing.
\nUpdates an Amazon EKS cluster to the specified Kubernetes version. Your\n cluster continues to function during the update. The response output includes an update\n ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
\nCluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING
(this status transition is\n eventually consistent). When the update is complete (either Failed
or\n Successful
), the cluster status moves to Active
.
If your cluster has managed node groups attached to it, all of your node groups’\n Kubernetes versions must match the cluster’s Kubernetes version in order to update the\n cluster to a new Kubernetes version.
", + "smithy.api#documentation": "Updates an Amazon EKS cluster to the specified Kubernetes version. Your\n cluster continues to function during the update. The response output includes an update\n ID that you can use to track the status of your cluster update with the DescribeUpdate API operation.
\nCluster updates are asynchronous, and they should finish within a few minutes. During\n an update, the cluster status moves to UPDATING
(this status transition is\n eventually consistent). When the update is complete (either Failed
or\n Successful
), the cluster status moves to Active
.
If your cluster has managed node groups attached to it, all of your node groups’\n Kubernetes versions must match the cluster’s Kubernetes version in order to update the\n cluster to a new Kubernetes version.
", "smithy.api#http": { "method": "POST", "uri": "/clusters/{name}/updates", @@ -7942,7 +7918,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the Kubernetes version or AMI version of an Amazon EKS managed node\n group.
\nYou can update a node group using a launch template only if the node group was\n originally deployed with a launch template. If you need to update a custom AMI in a node\n group that was deployed with a launch template, then update your custom AMI, specify the\n new ID in a new version of the launch template, and then update the node group to the\n new version of the launch template.
\nIf you update without a launch template, then you can update to the latest available\n AMI version of a node group's current Kubernetes version by not specifying a Kubernetes version in\n the request. You can update to the latest AMI version of your cluster's current Kubernetes\n version by specifying your cluster's Kubernetes version in the request. For information about\n Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the\n Amazon EKS User Guide. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.
\nYou cannot roll back a node group to an earlier Kubernetes version or AMI\n version.
\nWhen a node in a managed node group is terminated due to a scaling action or update,\n the pods in that node are drained first. Amazon EKS attempts to drain the nodes\n gracefully and will fail if it is unable to do so. You can force
the update\n if Amazon EKS is unable to drain the nodes as a result of a pod disruption\n budget issue.
Updates the Kubernetes version or AMI version of an Amazon EKS managed node\n group.
\nYou can update a node group using a launch template only if the node group was\n originally deployed with a launch template. If you need to update a custom AMI in a node\n group that was deployed with a launch template, then update your custom AMI, specify the\n new ID in a new version of the launch template, and then update the node group to the\n new version of the launch template.
\nIf you update without a launch template, then you can update to the latest available\n AMI version of a node group's current Kubernetes version by not specifying a Kubernetes\n version in the request. You can update to the latest AMI version of your cluster's\n current Kubernetes version by specifying your cluster's Kubernetes version in the\n request. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.
\nYou cannot roll back a node group to an earlier Kubernetes version or AMI\n version.
\nWhen a node in a managed node group is terminated due to a scaling action or update,\n the pods in that node are drained first. Amazon EKS attempts to drain the nodes\n gracefully and will fail if it is unable to do so. You can force
the update\n if Amazon EKS is unable to drain the nodes as a result of a pod disruption\n budget issue.
The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is used.\n For information about Linux versions, see Amazon EKS optimized Amazon Linux AMI versions in the Amazon EKS User Guide. Amazon EKS managed node groups support the November 2022 and later releases of the\n Windows AMIs. For information about Windows versions, see Amazon EKS optimized Windows AMI versions in the\n Amazon EKS User Guide.
\nIf you specify launchTemplate
, and your launch template uses a custom AMI, then don't specify \n releaseVersion
, or the node group update will fail.\n For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
The AMI version of the Amazon EKS optimized AMI to use for the update. By\n default, the latest available AMI version for the node group's Kubernetes version is\n used. For more information, see Amazon EKS optimized Amazon Linux 2 AMI versions in the Amazon EKS User Guide.\n If you specify launchTemplate
, and your launch template uses a custom AMI, then don't specify releaseVersion
,\n or the node group update will fail. For more information about using launch templates with Amazon EKS, see Launch template support in the Amazon EKS User Guide.
A list of errors that can occur when registering partition indexes for an existing table.
\nThese errors give the details about why an index registration failed and provide a limited number of partitions in the response, so that you can fix the partitions at fault and try registering the index again. The most common set of errors that can occur are categorized as follows:
\nEncryptedPartitionError: The partitions are encrypted.
\nInvalidPartitionTypeDataError: The partition value doesn't match the data type for that partition column.
\nMissingPartitionValueError: The partitions are encrypted.
\nUnsupportedPartitionCharacterError: Characters inside the partition value are not supported. For example: U+0000 , U+0001, U+0002.
\nInternalError: Any error which does not belong to other error codes.
\nA list of errors that can occur when registering partition indexes for an existing table.
\n\t\n\tThese errors give the details about why an index registration failed and provide a limited number of partitions in the response, so that you can fix the partitions at fault and try registering the index again. The most common set of errors that can occur are categorized as follows:
\n\t \n\tEncryptedPartitionError: The partitions are encrypted.
\nInvalidPartitionTypeDataError: The partition value doesn't match the data type for that partition column.
\nMissingPartitionValueError: The partitions are encrypted.
\nUnsupportedPartitionCharacterError: Characters inside the partition value are not supported. For example: U+0000 , U+0001, U+0002.
\nInternalError: Any error which does not belong to other error codes.
\nThe status of the blueprint registration.
\nCreating — The blueprint registration is in progress.
\nActive — The blueprint has been successfully registered.
\nUpdating — An update to the blueprint registration is in progress.
\nFailed — The blueprint registration failed.
\nThe status of the blueprint registration.
\n\t\n\tCreating — The blueprint registration is in progress.
\nActive — The blueprint has been successfully registered.
\nUpdating — An update to the blueprint registration is in progress.
\nFailed — The blueprint registration failed.
\nThe state of the blueprint run. Possible values are:
\nRunning — The blueprint run is in progress.
\nSucceeded — The blueprint run completed successfully.
\nFailed — The blueprint run failed and rollback is complete.
\nRolling Back — The blueprint run failed and rollback is in progress.
\nThe state of the blueprint run. Possible values are:
\n\t\n\tRunning — The blueprint run is in progress.
\nSucceeded — The blueprint run completed successfully.
\nFailed — The blueprint run failed and rollback is complete.
\nRolling Back — The blueprint run failed and rollback is in progress.
\nA structure containing the column name and column importance score for a column.
\nColumn importance helps you understand how columns contribute to your model, by identifying which columns in your records are more important than others.
" + "smithy.api#documentation": "A structure containing the column name and column importance score for a column.
\n\t\n\tColumn importance helps you understand how columns contribute to your model, by identifying which columns in your records are more important than others.
" } }, "com.amazonaws.glue#ColumnImportanceList": { @@ -6341,7 +6350,7 @@ } }, "traits": { - "smithy.api#documentation": "The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
\nFor more information, see Confusion matrix in Wikipedia.
" + "smithy.api#documentation": "The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
\n\t\n\tFor more information, see Confusion matrix in Wikipedia.
" } }, "com.amazonaws.glue#Connection": { @@ -6425,7 +6434,7 @@ "ConnectionType": { "target": "com.amazonaws.glue#ConnectionType", "traits": { - "smithy.api#documentation": "The type of the connection. Currently, these types are supported:
\n\n JDBC
- Designates a connection to a database through Java Database Connectivity (JDBC).
\n KAFKA
- Designates a connection to an Apache Kafka streaming platform.
\n MONGODB
- Designates a connection to a MongoDB document database.
\n NETWORK
- Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).
\n MARKETPLACE
- Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.
\n CUSTOM
- Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.
SFTP is not supported.
", + "smithy.api#documentation": "The type of the connection. Currently, these types are supported:
\n\t\n JDBC
- Designates a connection to a database through Java Database Connectivity (JDBC).
\n KAFKA
- Designates a connection to an Apache Kafka streaming platform.
\n MONGODB
- Designates a connection to a MongoDB document database.
\n NETWORK
- Designates a network connection to a data source within an Amazon Virtual Private Cloud environment (Amazon VPC).
\n MARKETPLACE
- Uses configuration settings contained in a connector purchased from Amazon Web Services Marketplace to read from and write to data stores that are not natively supported by Glue.
\n CUSTOM
- Uses configuration settings contained in a custom connector to read from and write to data stores that are not natively supported by Glue.
SFTP is not supported.
", "smithy.api#required": {} } }, @@ -6476,12 +6485,12 @@ "AwsKmsKeyId": { "target": "com.amazonaws.glue#NameString", "traits": { - "smithy.api#documentation": "An KMS key that is used to encrypt the connection password.
\nIf connection password protection is enabled, the caller of CreateConnection
\n and UpdateConnection
needs at least kms:Encrypt
permission on the\n specified KMS key, to encrypt passwords before storing them in the Data Catalog.
You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.
" + "smithy.api#documentation": "An KMS key that is used to encrypt the connection password.
\n\t\nIf connection password protection is enabled, the caller of CreateConnection
\n and UpdateConnection
needs at least kms:Encrypt
permission on the\n specified KMS key, to encrypt passwords before storing them in the Data Catalog.
You can set the decrypt permission to enable or restrict access on the password key according to your security requirements.
" } } }, "traits": { - "smithy.api#documentation": "The data structure used by the Data Catalog to encrypt the password as part of\n CreateConnection
or UpdateConnection
and store it in the\n ENCRYPTED_PASSWORD
field in the connection properties. You can enable catalog\n encryption or only password encryption.
When a CreationConnection
request arrives containing a password, the Data\n Catalog first encrypts the password using your KMS key. It then encrypts the whole\n connection object again if catalog encryption is also enabled.
This encryption requires that you set KMS key permissions to enable or restrict access\n on the password key according to your security requirements. For example, you might want only\n administrators to have decrypt permission on the password key.
" + "smithy.api#documentation": "The data structure used by the Data Catalog to encrypt the password as part of\n CreateConnection
or UpdateConnection
and store it in the\n ENCRYPTED_PASSWORD
field in the connection properties. You can enable catalog\n encryption or only password encryption.
When a CreationConnection
request arrives containing a password, the Data\n Catalog first encrypts the password using your KMS key. It then encrypts the whole\n connection object again if catalog encryption is also enabled.
This encryption requires that you set KMS key permissions to enable or restrict access\n on the password key according to your security requirements. For example, you might want only\n administrators to have decrypt permission on the password key.
" } }, "com.amazonaws.glue#ConnectionProperties": { @@ -7333,13 +7342,13 @@ "FieldName": { "target": "com.amazonaws.glue#FieldName", "traits": { - "smithy.api#documentation": "A key used to filter the crawler runs for a specified crawler. Valid values for each of the field names are:
\n\n CRAWL_ID
: A string representing the UUID identifier for a crawl.
\n STATE
: A string representing the state of the crawl.
\n START_TIME
and END_TIME
: The epoch timestamp in milliseconds.
\n DPU_HOUR
: The number of data processing unit (DPU) hours used for the crawl.
A key used to filter the crawler runs for a specified crawler. Valid values for each of the field names are:
\n\t\n CRAWL_ID
: A string representing the UUID identifier for a crawl.
\n STATE
: A string representing the state of the crawl.
\n START_TIME
and END_TIME
: The epoch timestamp in milliseconds.
\n DPU_HOUR
: The number of data processing unit (DPU) hours used for the crawl.
A defined comparator that operates on the value. The available operators are:
\n\n GT
: Greater than.
\n GE
: Greater than or equal to.
\n LT
: Less than.
\n LE
: Less than or equal to.
\n EQ
: Equal to.
\n NE
: Not equal to.
A defined comparator that operates on the value. The available operators are:
\n\t\n GT
: Greater than.
\n GE
: Greater than or equal to.
\n LT
: Less than.
\n LE
: Less than or equal to.
\n EQ
: Equal to.
\n NE
: Not equal to.
Creates a custom pattern that is used to detect sensitive data across the columns and rows of your structured data.
\nEach custom pattern you create specifies a regular expression and an optional list of context words. If no context words are passed only a regular expression is checked.
" + "smithy.api#documentation": "Creates a custom pattern that is used to detect sensitive data across the columns and rows of your structured data.
\n\t\n\tEach custom pattern you create specifies a regular expression and an optional list of context words. If no context words are passed only a regular expression is checked.
" } }, "com.amazonaws.glue#CreateCustomEntityTypeRequest": { @@ -7789,7 +7798,7 @@ "ContextWords": { "target": "com.amazonaws.glue#ContextWords", "traits": { - "smithy.api#documentation": "A list of context words. If none of these context words are found within the vicinity of the regular expression the data will not be detected as sensitive data.
\nIf no context words are passed only a regular expression is checked.
" + "smithy.api#documentation": "A list of context words. If none of these context words are found within the vicinity of the regular expression the data will not be detected as sensitive data.
\n\t\n\tIf no context words are passed only a regular expression is checked.
" } } } @@ -7831,7 +7840,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a data quality ruleset with DQDL rules applied to a specified Glue table.
\nYou create the ruleset using the Data Quality Definition Language (DQDL). For more information, see the Glue developer guide.
", + "smithy.api#documentation": "Creates a data quality ruleset with DQDL rules applied to a specified Glue table.
\n\t\n\tYou create the ruleset using the Data Quality Definition Language (DQDL). For more information, see the Glue developer guide.
", "smithy.api#idempotent": {} } }, @@ -8028,7 +8037,7 @@ "PublicKeys": { "target": "com.amazonaws.glue#PublicKeysList", "traits": { - "smithy.api#documentation": "A list of public keys to be used by the development endpoints for authentication. The use\n of this attribute is preferred over a single public key because the public keys allow you to\n have a different private key per client.
\nIf you previously created an endpoint with a public key, you must remove that key to be able\n to set a list of public keys. Call the UpdateDevEndpoint
API with the public\n key content in the deletePublicKeys
attribute, and the list of new keys in the\n addPublicKeys
attribute.
A list of public keys to be used by the development endpoints for authentication. The use\n of this attribute is preferred over a single public key because the public keys allow you to\n have a different private key per client.
\n \nIf you previously created an endpoint with a public key, you must remove that key to be able\n to set a list of public keys. Call the UpdateDevEndpoint
API with the public\n key content in the deletePublicKeys
attribute, and the list of new keys in the\n addPublicKeys
attribute.
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
\n WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
\n WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.
\nYou can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\n\t \n\tDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.
\n\t\n\tYou can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
The number of workers of a defined workerType
that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
" + "smithy.api#documentation": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
" } }, "NumberOfWorkers": { @@ -8204,7 +8213,7 @@ "Arguments": { "target": "com.amazonaws.glue#MapValue", "traits": { - "smithy.api#documentation": "The map of arguments used to configure this DevEndpoint
.
Valid arguments are:
\n\n \"--enable-glue-datacatalog\": \"\"
\n
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
The map of arguments used to configure this DevEndpoint
.
Valid arguments are:
\n\t\n \"--enable-glue-datacatalog\": \"\"
\n
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
The default arguments for this job.
\nYou can specify arguments here that your own job-execution script\n consumes, as well as arguments that Glue itself consumes.
\nJob arguments may be logged. Do not pass plaintext secrets as arguments.\n Retrieve secrets from a Glue Connection, Secrets Manager or\n other secret management mechanism if you intend to keep them within the Job.\n
\nFor information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
\nFor information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
" + "smithy.api#documentation": "The default arguments for this job.
\nYou can specify arguments here that your own job-execution script\n consumes, as well as arguments that Glue itself consumes.
\nJob arguments may be logged. Do not pass plaintext secrets as arguments.\n Retrieve secrets from a Glue Connection, Secrets Manager or\n other secret management mechanism if you intend to keep them within the Job.\n
\nFor information about how to specify and consume your own Job arguments, see the Calling Glue APIs in Python topic in the developer guide.
\n\t \nFor information about the key-value pairs that Glue consumes to set up your job, see the Special Parameters Used by Glue topic in the developer guide.
" } }, "NonOverridableArguments": { @@ -8353,7 +8362,7 @@ "smithy.api#deprecated": { "message": "This property is deprecated, use MaxCapacity instead." }, - "smithy.api#documentation": "This parameter is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this Job. You can\n allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing\n power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information,\n see the Glue pricing\n page.
" + "smithy.api#documentation": "This parameter is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this Job. You can\n allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing\n power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information,\n see the Glue pricing\n page.
" } }, "Timeout": { @@ -8365,7 +8374,7 @@ "MaxCapacity": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache \n Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. \n The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\n\t\t \n\tDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache \n Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. \n The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\nJobs that are created without specifying a Glue version default to Glue 0.9.
" + "smithy.api#documentation": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\n\t \n\tJobs that are created without specifying a Glue version default to Glue 0.9.
" } }, "NumberOfWorkers": { @@ -8401,7 +8410,7 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\nOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\n\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\n\t\n\tOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Creates an Glue machine learning transform. This operation creates the transform and\n all the necessary parameters to train it.
\nCall this operation as the first step in the process of using a machine learning transform\n (such as the FindMatches
transform) for deduplicating data. You can provide an\n optional Description
, in addition to the parameters that you want to use for your\n algorithm.
You must also specify certain parameters for the tasks that Glue runs on your\n behalf as part of learning from your data and creating a high-quality machine learning\n transform. These parameters include Role
, and optionally,\n AllocatedCapacity
, Timeout
, and MaxRetries
. For more\n information, see Jobs.
Creates an Glue machine learning transform. This operation creates the transform and\n all the necessary parameters to train it.
\n\t\n\tCall this operation as the first step in the process of using a machine learning transform\n (such as the FindMatches
transform) for deduplicating data. You can provide an\n optional Description
, in addition to the parameters that you want to use for your\n algorithm.
You must also specify certain parameters for the tasks that Glue runs on your\n behalf as part of learning from your data and creating a high-quality machine learning\n transform. These parameters include Role
, and optionally,\n AllocatedCapacity
, Timeout
, and MaxRetries
. For more\n information, see Jobs.
The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both Glue service role permissions to Glue resources, and Amazon S3 permissions required by the transform.
\nThis role needs Glue service role permissions to allow access to resources in Glue. See Attach a Policy to IAM Users That Access Glue.
\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both Glue service role permissions to Glue resources, and Amazon S3 permissions required by the transform.
\n\n\t\tThis role needs Glue service role permissions to allow access to resources in Glue. See Attach a Policy to IAM Users That Access Glue.
\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
\nThe number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\n\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\n\t\t\n\t\t\t\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
The number of workers of a defined workerType
that are allocated when this task runs.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
The number of workers of a defined workerType
that are allocated when this task runs.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
The compatibility mode of the schema. The possible values are:
\n\n NONE: No compatibility mode applies. You can use this choice in development scenarios or if you do not know the compatibility mode that you want to apply to schemas. Any new version added will be accepted without undergoing a compatibility check.
\n\n DISABLED: This compatibility choice prevents versioning for a particular schema. You can use this choice to prevent future versioning of a schema.
\n\n BACKWARD: This compatibility choice is recommended as it allows data receivers to read both the current and one previous schema version. This means that for instance, a new schema version cannot drop data fields or change the type of these fields, so they can't be read by readers using the previous version.
\n\n BACKWARD_ALL: This compatibility choice allows data receivers to read both the current and all previous schema versions. You can use this choice when you need to delete fields or add optional fields, and check compatibility against all previous schema versions.
\n\n FORWARD: This compatibility choice allows data receivers to read both the current and one next schema version, but not necessarily later versions. You can use this choice when you need to add fields or delete optional fields, but only check compatibility against the last schema version.
\n\n FORWARD_ALL: This compatibility choice allows data receivers to read written by producers of any new registered schema. You can use this choice when you need to add fields or delete optional fields, and check compatibility against all previous schema versions.
\n\n FULL: This compatibility choice allows data receivers to read data written by producers using the previous or next version of the schema, but not necessarily earlier or later versions. You can use this choice when you need to add or remove optional fields, but only check compatibility against the last schema version.
\n\n FULL_ALL: This compatibility choice allows data receivers to read data written by producers using all previous schema versions. You can use this choice when you need to add or remove optional fields, and check compatibility against all previous schema versions.
\nThe compatibility mode of the schema. The possible values are:
\n\t\n\n NONE: No compatibility mode applies. You can use this choice in development scenarios or if you do not know the compatibility mode that you want to apply to schemas. Any new version added will be accepted without undergoing a compatibility check.
\n\n DISABLED: This compatibility choice prevents versioning for a particular schema. You can use this choice to prevent future versioning of a schema.
\n\n BACKWARD: This compatibility choice is recommended as it allows data receivers to read both the current and one previous schema version. This means that for instance, a new schema version cannot drop data fields or change the type of these fields, so they can't be read by readers using the previous version.
\n\n BACKWARD_ALL: This compatibility choice allows data receivers to read both the current and all previous schema versions. You can use this choice when you need to delete fields or add optional fields, and check compatibility against all previous schema versions.
\n\n FORWARD: This compatibility choice allows data receivers to read both the current and one next schema version, but not necessarily later versions. You can use this choice when you need to add fields or delete optional fields, but only check compatibility against the last schema version.
\n\n FORWARD_ALL: This compatibility choice allows data receivers to read written by producers of any new registered schema. You can use this choice when you need to add fields or delete optional fields, and check compatibility against all previous schema versions.
\n\n FULL: This compatibility choice allows data receivers to read data written by producers using the previous or next version of the schema, but not necessarily earlier or later versions. You can use this choice when you need to add or remove optional fields, but only check compatibility against the last schema version.
\n\n FULL_ALL: This compatibility choice allows data receivers to read data written by producers using all previous schema versions. You can use this choice when you need to add or remove optional fields, and check compatibility against all previous schema versions.
\nThe type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated to use for the session. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
A list of context words. If none of these context words are found within the vicinity of the regular expression the data will not be detected as sensitive data.
\nIf no context words are passed only a regular expression is checked.
" + "smithy.api#documentation": "A list of context words. If none of these context words are found within the vicinity of the regular expression the data will not be detected as sensitive data.
\n\t\n\tIf no context words are passed only a regular expression is checked.
" } } }, @@ -10985,7 +10994,7 @@ } ], "traits": { - "smithy.api#documentation": "Delete the partition column statistics of a column.
\nThe Identity and Access Management (IAM) permission required for this operation is DeletePartition
.
Delete the partition column statistics of a column.
\n\t\n\tThe Identity and Access Management (IAM) permission required for this operation is DeletePartition
.
Retrieves table statistics of columns.
\nThe Identity and Access Management (IAM) permission required for this operation is DeleteTable
.
Retrieves table statistics of columns.
\n\t\n\tThe Identity and Access Management (IAM) permission required for this operation is DeleteTable
.
A version range may be supplied which may be of the format:
\na single version number, 5
\na range, 5-8 : deletes versions 5, 6, 7, 8
\nA version range may be supplied which may be of the format:
\n\ta single version number, 5
\na range, 5-8 : deletes versions 5, 6, 7, 8
\nSpecifies whether to write the manifest files to the Delta table path.
" } - }, - "CreateNativeDeltaTable": { - "target": "com.amazonaws.glue#NullableBoolean", - "traits": { - "smithy.api#documentation": "Specifies whether the crawler will create native tables, to allow integration with query engines that support querying of the Delta transaction log directly.
" - } } }, "traits": { @@ -12336,19 +12339,19 @@ "WorkerType": { "target": "com.amazonaws.glue#WorkerType", "traits": { - "smithy.api#documentation": "The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
\n WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
The type of predefined worker that is allocated to the development endpoint. Accepts a value of Standard, G.1X, or G.2X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
Known issue: when a development endpoint is created with the G.2X
\n WorkerType
configuration, the Spark drivers for the development endpoint will run on 4 vCPU, 16 GB of memory, and a 64 GB disk.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\nDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.
\nYou can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for running your ETL scripts on development endpoints.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\n\t \n\tDevelopment endpoints that are created without specifying a Glue version default to Glue 0.9.
\n\t\n\tYou can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
The number of workers of a defined workerType
that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The number of workers of a defined workerType
that are allocated to the development endpoint.
The maximum number of workers you can define are 299 for G.1X
, and 149 for G.2X
.
The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in\n your DevEndpoint
. Multiple values must be complete paths separated by a\n comma.
You can only use pure Python libraries with a DevEndpoint
. Libraries that rely on\n C extensions, such as the pandas Python data\n analysis library, are not currently supported.
The paths to one or more Python libraries in an Amazon S3 bucket that should be loaded in\n your DevEndpoint
. Multiple values must be complete paths separated by a\n comma.
You can only use pure Python libraries with a DevEndpoint
. Libraries that rely on\n C extensions, such as the pandas Python data\n analysis library, are not currently supported.
A map of arguments used to configure the DevEndpoint
.
Valid arguments are:
\n\n \"--enable-glue-datacatalog\": \"\"
\n
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
A map of arguments used to configure the DevEndpoint
.
Valid arguments are:
\n\t\n \"--enable-glue-datacatalog\": \"\"
\n
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
A structure that specifies a list of NullValueField structures that represent a custom null value such as zero or other value being used as a null placeholder unique to the dataset.
\nThe DropNullFields
transform removes custom null values only if both the value of the null placeholder and the datatype match the data.
A structure that specifies a list of NullValueField structures that represent a custom null value such as zero or other value being used as a null placeholder unique to the dataset.
\n\nThe DropNullFields
transform removes custom null values only if both the value of the null placeholder and the datatype match the data.
Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.
\nA value of true
means to scan all records, while a value of false
means to sample the records. If no value is specified, the value defaults to true
.
Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.
\n\t\n\tA value of true
means to scan all records, while a value of false
means to sample the records. If no value is specified, the value defaults to true
.
The percentage of the configured read capacity units to use by the Glue crawler. Read capacity units is a term defined by DynamoDB, and is a numeric value that acts as rate limiter for the number of reads that can be performed on that table per second.
\nThe valid values are null or a value between 0.1 to 1.5. A null value is used when user does not provide a value, and defaults to 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max configured Read Capacity Unit (for tables using on-demand mode).
" + "smithy.api#documentation": "The percentage of the configured read capacity units to use by the Glue crawler. Read capacity units is a term defined by DynamoDB, and is a numeric value that acts as rate limiter for the number of reads that can be performed on that table per second.
\n\t\n\tThe valid values are null or a value between 0.1 to 1.5. A null value is used when user does not provide a value, and defaults to 0.5 of the configured Read Capacity Unit (for provisioned tables), or 0.25 of the max configured Read Capacity Unit (for tables using on-demand mode).
" } } }, @@ -13528,7 +13531,7 @@ "AreaUnderPRCurve": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { - "smithy.api#documentation": "The area under the precision/recall curve (AUPRC) is a single number measuring the overall\n quality of the transform, that is independent of the choice made for precision vs. recall.\n Higher values indicate that you have a more attractive precision vs. recall tradeoff.
\nFor more information, see Precision and recall in Wikipedia.
" + "smithy.api#documentation": "The area under the precision/recall curve (AUPRC) is a single number measuring the overall\n quality of the transform, that is independent of the choice made for precision vs. recall.\n Higher values indicate that you have a more attractive precision vs. recall tradeoff.
\n\tFor more information, see Precision and recall in Wikipedia.
" } }, "Precision": { @@ -13552,7 +13555,7 @@ "ConfusionMatrix": { "target": "com.amazonaws.glue#ConfusionMatrix", "traits": { - "smithy.api#documentation": "The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
\nFor more information, see Confusion matrix in Wikipedia.
" + "smithy.api#documentation": "The confusion matrix shows you what your transform is predicting accurately and what types of errors it is making.
\n\tFor more information, see Confusion matrix in Wikipedia.
" } }, "ColumnImportances": { @@ -13578,13 +13581,13 @@ "PrecisionRecallTradeoff": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { - "smithy.api#documentation": "The value selected when tuning your transform for a balance between precision and recall.\n A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a\n value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0\n means very low recall, and choosing values close to 0.0 results in very low precision.
\nThe precision metric indicates how often your model is correct when it predicts a match.
\nThe recall metric indicates that for an actual match, how often your model predicts the\n match.
" + "smithy.api#documentation": "The value selected when tuning your transform for a balance between precision and recall.\n A value of 0.5 means no preference; a value of 1.0 means a bias purely for precision, and a\n value of 0.0 means a bias for recall. Because this is a tradeoff, choosing values close to 1.0\n means very low recall, and choosing values close to 0.0 results in very low precision.
\n\t\n\tThe precision metric indicates how often your model is correct when it predicts a match.
\n\t\n\tThe recall metric indicates that for an actual match, how often your model predicts the\n match.
" } }, "AccuracyCostTradeoff": { "target": "com.amazonaws.glue#GenericBoundedDouble", "traits": { - "smithy.api#documentation": "The value that is selected when tuning your transform for a balance between accuracy and\n cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0\n means a bias purely for accuracy, which typically results in a higher cost, sometimes\n substantially higher. A value of 0.0 means a bias purely for cost, which results in a less\n accurate FindMatches
transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
\nCost measures how many compute resources, and thus money, are consumed to run the\n transform.
" + "smithy.api#documentation": "The value that is selected when tuning your transform for a balance between accuracy and\n cost. A value of 0.5 means that the system balances accuracy and cost concerns. A value of 1.0\n means a bias purely for accuracy, which typically results in a higher cost, sometimes\n substantially higher. A value of 0.0 means a bias purely for cost, which results in a less\n accurate FindMatches
transform, sometimes with unacceptable accuracy.
Accuracy measures how well the transform finds true positives and true negatives. Increasing accuracy requires more machine resources and cost. But it also results in increased recall.
\n\t\n\tCost measures how many compute resources, and thus money, are consumed to run the\n transform.
" } }, "EnforceProvidedLabels": { @@ -14035,7 +14038,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves partition statistics of columns.
\nThe Identity and Access Management (IAM) permission required for this operation is GetPartition
.
Retrieves partition statistics of columns.
\n\t\n\tThe Identity and Access Management (IAM) permission required for this operation is GetPartition
.
Retrieves table statistics of columns.
\nThe Identity and Access Management (IAM) permission required for this operation is GetTable
.
Retrieves table statistics of columns.
\n\t\nThe Identity and Access Management (IAM) permission required for this operation is GetTable
.
Allows you to specify that you want to list the databases shared with your account. The allowable values are FOREIGN
or ALL
.
If set to FOREIGN
, will list the databases shared with your account.
If set to ALL
, will list the databases shared with your account, as well as the databases in yor local account.
Allows you to specify that you want to list the databases shared with your account. The allowable values are FOREIGN
or ALL
.
If set to FOREIGN
, will list the databases shared with your account.
If set to ALL
, will list the databases shared with your account, as well as the databases in yor local account.
Returns information on a job bookmark entry.
\nFor more information about enabling and using job bookmarks, see:
\n\n Job structure\n
\nReturns information on a job bookmark entry.
\n\t\n\tFor more information about enabling and using job bookmarks, see:
\n\t\n\t\n Job structure\n
\nGets a list of runs for a machine learning transform. Machine learning task runs are\n asynchronous tasks that Glue runs on your behalf as part of various machine learning\n workflows. You can get a sortable, filterable list of machine learning task runs by calling\n GetMLTaskRuns
with their parent transform's TransformID
and other\n optional parameters as documented in this section.
This operation returns a list of historic runs and must be paginated.
", + "smithy.api#documentation": "Gets a list of runs for a machine learning transform. Machine learning task runs are\n asynchronous tasks that Glue runs on your behalf as part of various machine learning\n workflows. You can get a sortable, filterable list of machine learning task runs by calling\n GetMLTaskRuns
with their parent transform's TransformID
and other\n optional parameters as documented in this section.
This operation returns a list of historic runs and must be paginated.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -15978,13 +15981,13 @@ "MaxCapacity": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\nWhen the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\n\t\t\nWhen the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
An expression that filters the partitions to be returned.
\nThe expression uses SQL syntax similar to the SQL WHERE
filter clause. The\n SQL statement parser JSQLParser parses the expression.
\n Operators: The following are the operators that you can use in the\n Expression
API call:
Checks whether the values of the two operands are equal; if yes, then the condition becomes\n true.
\nExample: Assume 'variable a' holds 10 and 'variable b' holds 20.
\n(a = b) is not true.
\nChecks whether the values of two operands are equal; if the values are not equal,\n then the condition becomes true.
\nExample: (a < > b) is true.
\nChecks whether the value of the left operand is greater than the value of the right\n operand; if yes, then the condition becomes true.
\nExample: (a > b) is not true.
\nChecks whether the value of the left operand is less than the value of the right\n operand; if yes, then the condition becomes true.
\nExample: (a < b) is true.
\nChecks whether the value of the left operand is greater than or equal to the value\n of the right operand; if yes, then the condition becomes true.
\nExample: (a >= b) is not true.
\nChecks whether the value of the left operand is less than or equal to the value of\n the right operand; if yes, then the condition becomes true.
\nExample: (a <= b) is true.
\nLogical operators.
\n\n Supported Partition Key Types: The following are the supported\n partition keys.
\n\n string
\n
\n date
\n
\n timestamp
\n
\n int
\n
\n bigint
\n
\n long
\n
\n tinyint
\n
\n smallint
\n
\n decimal
\n
If an type is encountered that is not valid, an exception is thrown.
\nThe following list shows the valid operators on each type. When you define a crawler, the\n partitionKey
type is created as a STRING
, to be compatible with the catalog\n partitions.
\n Sample API Call:
" + "smithy.api#documentation": "An expression that filters the partitions to be returned.
\nThe expression uses SQL syntax similar to the SQL WHERE
filter clause. The\n SQL statement parser JSQLParser parses the expression.
\n Operators: The following are the operators that you can use in the\n Expression
API call:
Checks whether the values of the two operands are equal; if yes, then the condition becomes\n true.
\nExample: Assume 'variable a' holds 10 and 'variable b' holds 20.
\n(a = b) is not true.
\nChecks whether the values of two operands are equal; if the values are not equal,\n then the condition becomes true.
\nExample: (a < > b) is true.
\nChecks whether the value of the left operand is greater than the value of the right\n operand; if yes, then the condition becomes true.
\nExample: (a > b) is not true.
\nChecks whether the value of the left operand is less than the value of the right\n operand; if yes, then the condition becomes true.
\nExample: (a < b) is true.
\nChecks whether the value of the left operand is greater than or equal to the value\n of the right operand; if yes, then the condition becomes true.
\nExample: (a >= b) is not true.
\nChecks whether the value of the left operand is less than or equal to the value of\n the right operand; if yes, then the condition becomes true.
\nExample: (a <= b) is true.
\nLogical operators.
\n\n Supported Partition Key Types: The following are the supported\n partition keys.
\n\n string
\n
\n date
\n
\n timestamp
\n
\n int
\n
\n bigint
\n
\n long
\n
\n tinyint
\n
\n smallint
\n
\n decimal
\n
If an type is encountered that is not valid, an exception is thrown.
\nThe following list shows the valid operators on each type. When you define a crawler, the\n partitionKey
type is created as a STRING
, to be compatible with the catalog\n partitions.
\n Sample API Call:
" } }, "NextToken": { @@ -16492,7 +16495,7 @@ "AdditionalPlanOptionsMap": { "target": "com.amazonaws.glue#AdditionalPlanOptionsMap", "traits": { - "smithy.api#documentation": "A map to hold additional optional key-value parameters.
\nCurrently, these key-value pairs are supported:
\n\n inferSchema
— Specifies whether to set inferSchema
to true or false for the default script generated by an Glue job. For example, to set inferSchema
to true, pass the following key value pair:
\n --additional-plan-options-map '{\"inferSchema\":\"true\"}'
\n
A map to hold additional optional key-value parameters.
\n\t\n\tCurrently, these key-value pairs are supported:
\n\t\n\t\n inferSchema
— Specifies whether to set inferSchema
to true or false for the default script generated by an Glue job. For example, to set inferSchema
to true, pass the following key value pair:
\n --additional-plan-options-map '{\"inferSchema\":\"true\"}'
\n
This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn
or SchemaName
has to be provided.
SchemaId$SchemaName: The name of the schema. One of SchemaArn
or SchemaName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn
or SchemaName
has to be provided.
SchemaId$SchemaName: The name of the schema. One of SchemaArn
or SchemaName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn
or SchemaName
has to be provided.
SchemaId$SchemaName: The name of the schema. One of SchemaArn
or SchemaName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn
or SchemaName
has to be provided.
SchemaId$SchemaName: The name of the schema. One of SchemaArn
or SchemaName
has to be provided.
Additional options for the table. Currently there are two keys supported:
\n\n pushDownPredicate
: to filter on partitions without having to list and read all the files in your dataset.
\n catalogPartitionPredicate
: to use server-side partition pruning using partition indexes in the Glue Data Catalog.
Additional options for the table. Currently there are two keys supported:
\n\t\n\t\n pushDownPredicate
: to filter on partitions without having to list and read all the files in your dataset.
\n catalogPartitionPredicate
: to use server-side partition pruning using partition indexes in the Glue Data Catalog.
Extra condition clause to filter data from source. For example:
\n\n BillingCity='Mountain View'
\n
When using a query instead of a table name, you should validate that the query works with the specified filterPredicate
.
Extra condition clause to filter data from source. For example:
\n\t\n\t\n BillingCity='Mountain View'
\n
When using a query instead of a table name, you should validate that the query works with the specified filterPredicate
.
Specify a value of RAWTYPES
or COMMENTS
to enable additional metadata in table responses. RAWTYPES
provides the native-level datatype. COMMENTS
provides comments associated with a column or table in the database.
If you do not need additional metadata, keep the field empty.
" + "smithy.api#documentation": "Specify a value of RAWTYPES
or COMMENTS
to enable additional metadata in table responses. RAWTYPES
provides the native-level datatype. COMMENTS
provides comments associated with a column or table in the database.
If you do not need additional metadata, keep the field empty.
" } } }, @@ -19704,7 +19707,7 @@ "smithy.api#deprecated": { "message": "This property is deprecated, use MaxCapacity instead." }, - "smithy.api#documentation": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to runs of this job. You can\n allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing\n power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information,\n see the Glue pricing\n page.
\n " + "smithy.api#documentation": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to runs of this job. You can\n allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing\n power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information,\n see the Glue pricing\n page.
\n\t " } }, "Timeout": { @@ -19716,13 +19719,13 @@ "MaxCapacity": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL\n job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache \n Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. \n The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\n\t \n\tDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL\n job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache \n Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. \n The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\nJobs that are created without specifying a Glue version default to Glue 0.9.
" + "smithy.api#documentation": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\n\t \n\tJobs that are created without specifying a Glue version default to Glue 0.9.
" } }, "CodeGenConfigurationNodes": { @@ -19758,7 +19761,7 @@ "ExecutionClass": { "target": "com.amazonaws.glue#ExecutionClass", "traits": { - "smithy.api#documentation": "Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\nOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. The standard execution class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\n\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\n\t\n\tOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to this JobRun.\n From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
" + "smithy.api#documentation": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) allocated to this JobRun.\n From 2 to 100 DPUs can be allocated; the default is 10. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
" } }, "ExecutionTime": { @@ -20013,13 +20016,13 @@ "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
" + "smithy.api#documentation": "The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
" } }, "MaxCapacity": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\n\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\nJobs that are created without specifying a Glue version default to Glue 0.9.
" + "smithy.api#documentation": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
\n\t \n\tJobs that are created without specifying a Glue version default to Glue 0.9.
" } }, "DPUSeconds": { @@ -20067,7 +20070,7 @@ "ExecutionClass": { "target": "com.amazonaws.glue#ExecutionClass", "traits": { - "smithy.api#documentation": "Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\nOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\n\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\n\t\n\tOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this job. You can\n allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing\n power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information,\n see the Glue pricing\n page.
" + "smithy.api#documentation": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this job. You can\n allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure of processing\n power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information,\n see the Glue pricing\n page.
" } }, "Timeout": { @@ -20217,13 +20220,13 @@ "MaxCapacity": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache \n Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. \n The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the Glue pricing page.
\n \n\tDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\") or Apache \n Spark streaming ETL job (JobCommand.Name
=\"gluestreaming\"), you can allocate a minimum of 2 DPUs. \n The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
For Glue version 2.0 jobs, you cannot instead specify a Maximum capacity
. Instead, you should specify a Worker type
and the Number of workers
.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
The type of predefined worker that is allocated when a job runs. Accepts a value of Standard, G.1X, G.2X, or G.025X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker maps to 1 DPU (4 vCPU, 16 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.2X
worker type, each worker maps to 2 DPU (8 vCPU, 32 GB of memory, 128 GB disk), and provides 1 executor per worker. We recommend this worker type for memory-intensive jobs.
For the G.025X
worker type, each worker maps to 0.25 DPU (2 vCPU, 4 GB of memory, 64 GB disk), and provides 1 executor per worker. We recommend this worker type for low volume streaming jobs. This worker type is only available for Glue version 3.0 streaming jobs.
Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
" + "smithy.api#documentation": "Glue version determines the versions of Apache Spark and Python that Glue supports. The Python version indicates the version supported for jobs of type Spark.
\n \nFor more information about the available Glue versions and corresponding Spark and Python versions, see Glue version in the developer guide.
" } }, "CodeGenConfigurationNodes": { @@ -20259,7 +20262,7 @@ "ExecutionClass": { "target": "com.amazonaws.glue#ExecutionClass", "traits": { - "smithy.api#documentation": "Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\nOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\n\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\n\t\n\tOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Specifies whether data lineage is enabled for the crawler. Valid values are:
\nENABLE: enables data lineage for the crawler
\nDISABLE: disables data lineage for the crawler
\nSpecifies whether data lineage is enabled for the crawler. Valid values are:
\n\t\n\tENABLE: enables data lineage for the crawler
\nDISABLE: disables data lineage for the crawler
\nRetrieves the names of all crawler resources in this Amazon Web Services account, or the\n resources with the specified tag. This operation allows you to see which\n resources are available in your account, and their names.
\nThis operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
Retrieves the names of all crawler resources in this Amazon Web Services account, or the\n resources with the specified tag. This operation allows you to see which\n resources are available in your account, and their names.
\n\nThis operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
Returns all the crawls of a specified crawler. Returns only the crawls that have occurred since the launch date of the crawler history feature, and only retains up to 12 months of crawls. Older crawls will not be returned.
\nYou may use this API to:
\nRetrive all the crawls of a specified crawler.
\nRetrieve all the crawls of a specified crawler within a limited count.
\nRetrieve all the crawls of a specified crawler in a specific time range.
\nRetrieve all the crawls of a specified crawler with a particular state, crawl ID, or DPU hour value.
\nReturns all the crawls of a specified crawler. Returns only the crawls that have occurred since the launch date of the crawler history feature, and only retains up to 12 months of crawls. Older crawls will not be returned.
\n\t\n\tYou may use this API to:
\n\tRetrive all the crawls of a specified crawler.
\nRetrieve all the crawls of a specified crawler within a limited count.
\nRetrieve all the crawls of a specified crawler in a specific time range.
\nRetrieve all the crawls of a specified crawler with a particular state, crawl ID, or DPU hour value.
\nRetrieves the names of all DevEndpoint
resources in this Amazon Web Services account, or the\n resources with the specified tag. This operation allows you to see which resources are\n available in your account, and their names.
This operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
Retrieves the names of all DevEndpoint
resources in this Amazon Web Services account, or the\n resources with the specified tag. This operation allows you to see which resources are\n available in your account, and their names.
This operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
Retrieves the names of all job resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
\nThis operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
Retrieves the names of all job resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
\n\t\n\tThis operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
Retrieves the names of all trigger resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
\nThis operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
Retrieves the names of all trigger resources in this Amazon Web Services account, or the resources with the specified tag. This operation allows you to see which resources are available in your account, and their names.
\n\nThis operation takes the optional Tags
field, which you can use as a filter on\n the response so that tagged resources can be retrieved as a group. If you choose to use tags\n filtering, only resources with the tag are retrieved.
The name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both Glue service role permissions to Glue resources, and Amazon S3 permissions required by the transform.
\nThis role needs Glue service role permissions to allow access to resources in Glue. See Attach a Policy to IAM Users That Access Glue.
\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
\nThe name or Amazon Resource Name (ARN) of the IAM role with the required permissions. The required permissions include both Glue service role permissions to Glue resources, and Amazon S3 permissions required by the transform.
\n\n\t\tThis role needs Glue service role permissions to allow access to resources in Glue. See Attach a Policy to IAM Users That Access Glue.
\nThis role needs permission to your Amazon Simple Storage Service (Amazon S3) sources, targets, temporary directory, scripts, and any libraries used by the task run for this transform.
\nThe number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\n\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\n\t\t\n\t\t\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
When the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
The type of predefined worker that is allocated when a task of this transform runs. Accepts a value of Standard, G.1X, or G.2X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
\n MaxCapacity
is a mutually exclusive option with NumberOfWorkers
and WorkerType
.
If either NumberOfWorkers
or WorkerType
is set, then MaxCapacity
cannot be set.
If MaxCapacity
is set then neither NumberOfWorkers
or WorkerType
can be set.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
\n MaxCapacity
and NumberOfWorkers
must both be at least 1.
The number of workers of a defined workerType
that are allocated when a task of the transform runs.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
The number of workers of a defined workerType
that are allocated when a task of the transform runs.
If WorkerType
is set, then NumberOfWorkers
is required (and vice versa).
The encryption mode applied to user data. Valid values are:
\nDISABLED: encryption is disabled
\nSSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) for user data stored in Amazon S3.
\nThe encryption mode applied to user data. Valid values are:
\n\t\n\tDISABLED: encryption is disabled
\nSSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) for user data stored in Amazon S3.
\nOnly applicable to nested data structures. If you want to change the parent structure, but also one of its children, you can fill out this data strucutre. It is also Mapping
, but its FromPath
will be the parent's FromPath
plus the FromPath
from this structure.
For the children part, suppose you have the structure:
\n\n {\n \"FromPath\": \"OuterStructure\",\n \"ToKey\": \"OuterStructure\",\n \"ToType\": \"Struct\",\n \"Dropped\": false,\n \"Chidlren\": [{\n \"FromPath\": \"inner\",\n \"ToKey\": \"inner\",\n \"ToType\": \"Double\",\n \"Dropped\": false,\n }]\n}
\n
You can specify a Mapping
that looks like:
\n {\n \"FromPath\": \"OuterStructure\",\n \"ToKey\": \"OuterStructure\",\n \"ToType\": \"Struct\",\n \"Dropped\": false,\n \"Chidlren\": [{\n \"FromPath\": \"inner\",\n \"ToKey\": \"inner\",\n \"ToType\": \"Double\",\n \"Dropped\": false,\n }]\n}
\n
Only applicable to nested data structures. If you want to change the parent structure, but also one of its children, you can fill out this data strucutre. It is also Mapping
, but its FromPath
will be the parent's FromPath
plus the FromPath
from this structure.
For the children part, suppose you have the structure:
\n\t\n\t\n {\n \"FromPath\": \"OuterStructure\",\n \"ToKey\": \"OuterStructure\",\n \"ToType\": \"Struct\",\n \"Dropped\": false,\n \"Chidlren\": [{\n \"FromPath\": \"inner\",\n \"ToKey\": \"inner\",\n \"ToType\": \"Double\",\n \"Dropped\": false,\n }]\n}
\n
You can specify a Mapping
that looks like:
\n {\n \"FromPath\": \"OuterStructure\",\n \"ToKey\": \"OuterStructure\",\n \"ToType\": \"Struct\",\n \"Dropped\": false,\n \"Chidlren\": [{\n \"FromPath\": \"inner\",\n \"ToKey\": \"inner\",\n \"ToType\": \"Double\",\n \"Dropped\": false,\n }]\n}
\n
Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.
\nA value of true
means to scan all records, while a value of false
means to sample the records. If no value is specified, the value defaults to true
.
Indicates whether to scan all the records, or to sample rows from the table. Scanning all the records can take a long time when the table is not a high throughput table.
\n\t\n\tA value of true
means to scan all records, while a value of false
means to sample the records. If no value is specified, the value defaults to true
.
The status of the partition index.
\nThe possible statuses are:
\nCREATING: The index is being created. When an index is in a CREATING state, the index or its table cannot be deleted.
\nACTIVE: The index creation succeeds.
\nFAILED: The index creation fails.
\nDELETING: The index is deleted from the list of indexes.
\nThe status of the partition index.
\n\t\n\tThe possible statuses are:
\n\tCREATING: The index is being created. When an index is in a CREATING state, the index or its table cannot be deleted.
\nACTIVE: The index creation succeeds.
\nFAILED: The index creation fails.
\nDELETING: The index is deleted from the list of indexes.
\nThe values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.
\nThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise Glue will add the values to the wrong keys.
" + "smithy.api#documentation": "The values of the partition. Although this parameter is not required by the SDK, you must specify this parameter for a valid input.
\n\t\n\tThe values for the keys for the new partition must be passed as an array of String objects that must be ordered in the same order as the partition keys appearing in the Amazon S3 prefix. Otherwise Glue will add the values to the wrong keys.
" } }, "LastAccessTime": { @@ -24762,7 +24765,7 @@ "RecrawlBehavior": { "target": "com.amazonaws.glue#RecrawlBehavior", "traits": { - "smithy.api#documentation": "Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run.
\nA value of CRAWL_EVERYTHING
specifies crawling the entire dataset again.
A value of CRAWL_NEW_FOLDERS_ONLY
specifies crawling only folders that were added since the last crawler run.
A value of CRAWL_EVENT_MODE
specifies crawling only the changes identified by Amazon S3 events.
Specifies whether to crawl the entire dataset again or to crawl only folders that were added since the last crawler run.
\n\t\n\tA value of CRAWL_EVERYTHING
specifies crawling the entire dataset again.
A value of CRAWL_NEW_FOLDERS_ONLY
specifies crawling only folders that were added since the last crawler run.
A value of CRAWL_EVENT_MODE
specifies crawling only the changes identified by Amazon S3 events.
Adds a new version to the existing schema. Returns an error if new version of schema does not meet the compatibility requirements of the schema set. This API will not create a new schema set and will return a 404 error if the schema set is not already present in the Schema Registry.
\nIf this is the first schema definition to be registered in the Schema Registry, this API will store the schema version and return immediately. Otherwise, this call has the potential to run longer than other operations due to compatibility modes. You can call the GetSchemaVersion
API with the SchemaVersionId
to check compatibility modes.
If the same schema definition is already stored in Schema Registry as a version, the schema ID of the existing schema is returned to the caller.
" + "smithy.api#documentation": "Adds a new version to the existing schema. Returns an error if new version of schema does not meet the compatibility requirements of the schema set. This API will not create a new schema set and will return a 404 error if the schema set is not already present in the Schema Registry.
\nIf this is the first schema definition to be registered in the Schema Registry, this API will store the schema version and return immediately. Otherwise, this call has the potential to run longer than other operations due to compatibility modes. You can call the GetSchemaVersion
API with the SchemaVersionId
to check compatibility modes.
If the same schema definition is already stored in Schema Registry as a version, the schema ID of the existing schema is returned to the caller.
" } }, "com.amazonaws.glue#RegisterSchemaVersionInput": { @@ -24903,7 +24906,7 @@ "SchemaId": { "target": "com.amazonaws.glue#SchemaId", "traits": { - "smithy.api#documentation": "This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
SchemaId$SchemaName: The name of the schema. Either SchemaArn
or SchemaName
and RegistryName
has to be provided.
Resets a bookmark entry.
\nFor more information about enabling and using job bookmarks, see:
\n\n Job structure\n
\nResets a bookmark entry.
\n\t\n\tFor more information about enabling and using job bookmarks, see:
\n\t\n\t\n Job structure\n
\nA list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.
\nThe Comparator
member of the PropertyPredicate
struct is used only for time fields, and can be omitted for other field types. Also, when comparing string values, such as when Key=Name
, a fuzzy match algorithm is used. The Key
field (for example, the value of the Name
field) is split on certain punctuation characters, for example, -, :, #, etc. into tokens. Then each token is exact-match compared with the Value
member of PropertyPredicate
. For example, if Key=Name
and Value=link
, tables named customer-link
and xx-link-yy
are returned, but xxlinkyy
is not returned.
A list of key-value pairs, and a comparator used to filter the search results. Returns all entities matching the predicate.
\n\t\n\tThe Comparator
member of the PropertyPredicate
struct is used only for time fields, and can be omitted for other field types. Also, when comparing string values, such as when Key=Name
, a fuzzy match algorithm is used. The Key
field (for example, the value of the Name
field) is split on certain punctuation characters, for example, -, :, #, etc. into tokens. Then each token is exact-match compared with the Value
member of PropertyPredicate
. For example, if Key=Name
and Value=link
, tables named customer-link
and xx-link-yy
are returned, but xxlinkyy
is not returned.
A string used for a text search.
\nSpecifying a value in quotes filters based on an exact match to the value.
" + "smithy.api#documentation": "A string used for a text search.
\n\tSpecifying a value in quotes filters based on an exact match to the value.
" } }, "SortCriteria": { @@ -26722,7 +26725,7 @@ "ResourceShareType": { "target": "com.amazonaws.glue#ResourceShareType", "traits": { - "smithy.api#documentation": "Allows you to specify that you want to search the tables shared with your account. The allowable values are FOREIGN
or ALL
.
If set to FOREIGN
, will search the tables shared with your account.
If set to ALL
, will search the tables shared with your account, as well as the tables in yor local account.
Allows you to specify that you want to search the tables shared with your account. The allowable values are FOREIGN
or ALL
.
If set to FOREIGN
, will search the tables shared with your account.
If set to ALL
, will search the tables shared with your account, as well as the tables in yor local account.
A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you have a datasource named \"MyDataSource\". If you specify From
as MyDataSource, and Alias
as SqlName, then in your SQL you can do:
\n select *\nfrom SqlName
\n
and that gets data from MyDataSource.
", + "smithy.api#documentation": "A list of aliases. An alias allows you to specify what name to use in the SQL for a given input. For example, you have a datasource named \"MyDataSource\". If you specify From
as MyDataSource, and Alias
as SqlName, then in your SQL you can do:
\n select *\nfrom SqlName
\n
and that gets data from MyDataSource.
", "smithy.api#required": {} } }, @@ -27929,7 +27932,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables you to provide additional labels (examples of truth) to be used to teach the\n machine learning transform and improve its quality. This API operation is generally used as\n part of the active learning workflow that starts with the\n StartMLLabelingSetGenerationTaskRun
call and that ultimately results in\n improving the quality of your machine learning transform.
After the StartMLLabelingSetGenerationTaskRun
finishes, Glue machine learning\n will have generated a series of questions for humans to answer. (Answering these questions is\n often called 'labeling' in the machine learning workflows). In the case of the\n FindMatches
transform, these questions are of the form, “What is the correct\n way to group these rows together into groups composed entirely of matching records?” After the\n labeling process is finished, users upload their answers/labels with a call to\n StartImportLabelsTaskRun
. After StartImportLabelsTaskRun
finishes,\n all future runs of the machine learning transform use the new and improved labels and perform\n a higher-quality transformation.
By default, StartMLLabelingSetGenerationTaskRun
continually learns from and\n combines all labels that you upload unless you set Replace
to true. If you set\n Replace
to true, StartImportLabelsTaskRun
deletes and forgets all\n previously uploaded labels and learns only from the exact set that you upload. Replacing\n labels can be helpful if you realize that you previously uploaded incorrect labels, and you\n believe that they are having a negative effect on your transform quality.
You can check on the status of your task run by calling the GetMLTaskRun
\n operation.
Enables you to provide additional labels (examples of truth) to be used to teach the\n machine learning transform and improve its quality. This API operation is generally used as\n part of the active learning workflow that starts with the\n StartMLLabelingSetGenerationTaskRun
call and that ultimately results in\n improving the quality of your machine learning transform.
After the StartMLLabelingSetGenerationTaskRun
finishes, Glue machine learning\n will have generated a series of questions for humans to answer. (Answering these questions is\n often called 'labeling' in the machine learning workflows). In the case of the\n FindMatches
transform, these questions are of the form, “What is the correct\n way to group these rows together into groups composed entirely of matching records?” After the\n labeling process is finished, users upload their answers/labels with a call to\n StartImportLabelsTaskRun
. After StartImportLabelsTaskRun
finishes,\n all future runs of the machine learning transform use the new and improved labels and perform\n a higher-quality transformation.
By default, StartMLLabelingSetGenerationTaskRun
continually learns from and\n combines all labels that you upload unless you set Replace
to true. If you set\n Replace
to true, StartImportLabelsTaskRun
deletes and forgets all\n previously uploaded labels and learns only from the exact set that you upload. Replacing\n labels can be helpful if you realize that you previously uploaded incorrect labels, and you\n believe that they are having a negative effect on your transform quality.
You can check on the status of your task run by calling the GetMLTaskRun
\n operation.
This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this JobRun.\n You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
" + "smithy.api#documentation": "This field is deprecated. Use MaxCapacity
instead.
The number of Glue data processing units (DPUs) to allocate to this JobRun.\n You can allocate a minimum of 2 DPUs; the default is 10. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
" } }, "Timeout": { "target": "com.amazonaws.glue#Timeout", "traits": { - "smithy.api#documentation": "The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
" + "smithy.api#documentation": "The JobRun
timeout in minutes. This is the maximum time that a job run can\n consume resources before it is terminated and enters TIMEOUT
status. This value overrides the timeout value set in the parent job.
Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).
" } }, "MaxCapacity": { "target": "com.amazonaws.glue#NullableDouble", "traits": { - "smithy.api#documentation": "The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job, or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
The number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure\n of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory.\n For more information, see the Glue\n pricing page.
\n\nDo not set Max Capacity
if using WorkerType
and NumberOfWorkers
.
The value that can be allocated for MaxCapacity
depends on whether you are\n running a Python shell job, or an Apache Spark ETL job:
When you specify a Python shell job (JobCommand.Name
=\"pythonshell\"), you can\n allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.
When you specify an Apache Spark ETL job (JobCommand.Name
=\"glueetl\"), you can allocate a minimum of 2 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.
Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\nOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Indicates whether the job is run with a standard or flexible execution class. The standard execution-class is ideal for time-sensitive workloads that require fast job startup and dedicated resources.
\n\nThe flexible execution class is appropriate for time-insensitive jobs whose start and completion times may vary.
\n\t\n\tOnly jobs with Glue version 3.0 and above and command type glueetl
will be allowed to set ExecutionClass
to FLEX
. The flexible execution class is available for Spark jobs.
Starts a task to estimate the quality of the transform.
\nWhen you provide label sets as examples of truth, Glue machine learning uses some of\n those examples to learn from them. The rest of the labels are used as a test to estimate\n quality.
\nReturns a unique identifier for the run. You can call GetMLTaskRun
to get more\n information about the stats of the EvaluationTaskRun
.
Starts a task to estimate the quality of the transform.
\n\t\n\tWhen you provide label sets as examples of truth, Glue machine learning uses some of\n those examples to learn from them. The rest of the labels are used as a test to estimate\n quality.
\n\t\n\tReturns a unique identifier for the run. You can call GetMLTaskRun
to get more\n information about the stats of the EvaluationTaskRun
.
Starts the active learning workflow for your machine learning transform to improve the\n transform's quality by generating label sets and adding labels.
\nWhen the StartMLLabelingSetGenerationTaskRun
finishes, Glue will have\n generated a \"labeling set\" or a set of questions for humans to answer.
In the case of the FindMatches
transform, these questions are of the form,\n “What is the correct way to group these rows together into groups composed entirely of\n matching records?”
After the labeling process is finished, you can upload your labels with a call to\n StartImportLabelsTaskRun
. After StartImportLabelsTaskRun
finishes,\n all future runs of the machine learning transform will use the new and improved labels and\n perform a higher-quality transformation.
Starts the active learning workflow for your machine learning transform to improve the\n transform's quality by generating label sets and adding labels.
\n\t\n\tWhen the StartMLLabelingSetGenerationTaskRun
finishes, Glue will have\n generated a \"labeling set\" or a set of questions for humans to answer.
In the case of the FindMatches
transform, these questions are of the form,\n “What is the correct way to group these rows together into groups composed entirely of\n matching records?”
After the labeling process is finished, you can upload your labels with a call to\n StartImportLabelsTaskRun
. After StartImportLabelsTaskRun
finishes,\n all future runs of the machine learning transform will use the new and improved labels and\n perform a higher-quality transformation.
An object that references a schema stored in the Glue Schema Registry.
\nWhen creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference.
" + "smithy.api#documentation": "An object that references a schema stored in the Glue Schema Registry.
\n\t\n\tWhen creating a table, you can pass an empty list of columns for the schema, and instead use a schema reference.
" } } }, @@ -28999,7 +29002,7 @@ "PartitionKeys": { "target": "com.amazonaws.glue#ColumnList", "traits": { - "smithy.api#documentation": "A list of columns by which the table is partitioned. Only primitive\n types are supported as partition keys.
\nWhen you create a table used by Amazon Athena, and you do not specify any\n partitionKeys
, you must at least set the value of partitionKeys
to\n an empty list. For example:
\n \"PartitionKeys\": []
\n
A list of columns by which the table is partitioned. Only primitive\n types are supported as partition keys.
\n\tWhen you create a table used by Amazon Athena, and you do not specify any\n partitionKeys
, you must at least set the value of partitionKeys
to\n an empty list. For example:
\n \"PartitionKeys\": []
\n
A list of columns by which the table is partitioned. Only primitive\n types are supported as partition keys.
\nWhen you create a table used by Amazon Athena, and you do not specify any\n partitionKeys
, you must at least set the value of partitionKeys
to\n an empty list. For example:
\n \"PartitionKeys\": []
\n
A list of columns by which the table is partitioned. Only primitive\n types are supported as partition keys.
\n\tWhen you create a table used by Amazon Athena, and you do not specify any\n partitionKeys
, you must at least set the value of partitionKeys
to\n an empty list. For example:
\n \"PartitionKeys\": []
\n
The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.
\nAdditionally, imported labels and trained transforms can now be encrypted using a customer provided KMS key.
" + "smithy.api#documentation": "The encryption-at-rest settings of the transform that apply to accessing user data. Machine learning transforms can access user data encrypted in Amazon S3 using KMS.
\n \n\tAdditionally, imported labels and trained transforms can now be encrypted using a customer provided KMS key.
" } }, "com.amazonaws.glue#TransformFilterCriteria": { @@ -29886,7 +29889,7 @@ "TransformType": { "target": "com.amazonaws.glue#TransformType", "traits": { - "smithy.api#documentation": "The type of machine learning transform.
\nFor information about the types of machine learning transforms, see Creating Machine Learning Transforms.
", + "smithy.api#documentation": "The type of machine learning transform.
\n\tFor information about the types of machine learning transforms, see Creating Machine Learning Transforms.
", "smithy.api#required": {} } }, @@ -30299,7 +30302,7 @@ "UnionType": { "target": "com.amazonaws.glue#UnionType", "traits": { - "smithy.api#documentation": "Indicates the type of Union transform.
\nSpecify ALL
to join all rows from data sources to the resulting DynamicFrame. The resulting union does not remove duplicate rows.
Specify DISTINCT
to remove duplicate rows in the resulting DynamicFrame.
Indicates the type of Union transform.
\n\t\n\tSpecify ALL
to join all rows from data sources to the resulting DynamicFrame. The resulting union does not remove duplicate rows.
Specify DISTINCT
to remove duplicate rows in the resulting DynamicFrame.
Creates or updates partition statistics of columns.
\nThe Identity and Access Management (IAM) permission required for this operation is UpdatePartition
.
Creates or updates partition statistics of columns.
\n\t\nThe Identity and Access Management (IAM) permission required for this operation is UpdatePartition
.
Creates or updates table statistics of columns.
\nThe Identity and Access Management (IAM) permission required for this operation is UpdateTable
.
Creates or updates table statistics of columns.
\n\t\n\tThe Identity and Access Management (IAM) permission required for this operation is UpdateTable
.
The map of arguments to add the map of arguments used to configure the\n DevEndpoint
.
Valid arguments are:
\n\n \"--enable-glue-datacatalog\": \"\"
\n
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
The map of arguments to add the map of arguments used to configure the\n DevEndpoint
.
Valid arguments are:
\n\t\n \"--enable-glue-datacatalog\": \"\"
\n
You can specify a version of Python support for development endpoints by using the Arguments
parameter in the CreateDevEndpoint
or UpdateDevEndpoint
APIs. If no arguments are provided, the version defaults to Python 2.
Synchronizes a job from the source control repository. This operation takes the job artifacts that are located in the remote repository and updates the Glue internal stores with these artifacts.
\nThis API supports optional parameters which take in the repository information.
" + "smithy.api#documentation": "Synchronizes a job from the source control repository. This operation takes the job artifacts that are located in the remote repository and updates the Glue internal stores with these artifacts.
\n\t\n\tThis API supports optional parameters which take in the repository information.
" } }, "com.amazonaws.glue#UpdateJobFromSourceControlRequest": { @@ -31471,7 +31474,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.
\nAfter calling this operation, you can call the StartMLEvaluationTaskRun
\n operation to assess how well your new parameters achieved your goals (such as improving the\n quality of your machine learning transform, or making it more cost-effective).
Updates an existing machine learning transform. Call this operation to tune the algorithm parameters to achieve better results.
\n\t\nAfter calling this operation, you can call the StartMLEvaluationTaskRun
\n operation to assess how well your new parameters achieved your goals (such as improving the\n quality of your machine learning transform, or making it more cost-effective).
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\nWhen the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of\n processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more\n information, see the Glue pricing\n page.
\n\t\t\nWhen the WorkerType
field is set to a value other than Standard
, the MaxCapacity
field is set automatically and becomes read-only.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
\nFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The type of predefined worker that is allocated when this task runs. Accepts a value of Standard, G.1X, or G.2X.
\n\tFor the Standard
worker type, each worker provides 4 vCPU, 16 GB of memory and a 50GB disk, and 2 executors per worker.
For the G.1X
worker type, each worker provides 4 vCPU, 16 GB of memory and a 64GB disk, and 1 executor per worker.
For the G.2X
worker type, each worker provides 8 vCPU, 32 GB of memory and a 128GB disk, and 1 executor per worker.
The new partition object to update the partition to.
\nThe Values
property can't be changed. If you want to change the partition key values for a partition, delete and recreate the partition.
The new partition object to update the partition to.
\n\t\n\tThe Values
property can't be changed. If you want to change the partition key values for a partition, delete and recreate the partition.
Updates the description, compatibility setting, or version checkpoint for a schema set.
\nFor updating the compatibility setting, the call will not validate compatibility for the entire set of schema versions with the new compatibility setting. If the value for Compatibility
is provided, the VersionNumber
(a checkpoint) is also required. The API will validate the checkpoint version number for consistency.
If the value for the VersionNumber
(checkpoint) is provided, Compatibility
is optional and this can be used to set/reset a checkpoint for the schema.
This update will happen only if the schema is in the AVAILABLE state.
" + "smithy.api#documentation": "Updates the description, compatibility setting, or version checkpoint for a schema set.
\n\tFor updating the compatibility setting, the call will not validate compatibility for the entire set of schema versions with the new compatibility setting. If the value for Compatibility
is provided, the VersionNumber
(a checkpoint) is also required. The API will validate the checkpoint version number for consistency.
If the value for the VersionNumber
(checkpoint) is provided, Compatibility
is optional and this can be used to set/reset a checkpoint for the schema.
This update will happen only if the schema is in the AVAILABLE state.
" } }, "com.amazonaws.glue#UpdateSchemaInput": { @@ -31729,7 +31732,7 @@ "SchemaId": { "target": "com.amazonaws.glue#SchemaId", "traits": { - "smithy.api#documentation": "This is a wrapper structure to contain schema identity fields. The structure contains:
\nSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn
or SchemaName
has to be provided.
SchemaId$SchemaName: The name of the schema. One of SchemaArn
or SchemaName
has to be provided.
This is a wrapper structure to contain schema identity fields. The structure contains:
\n\tSchemaId$SchemaArn: The Amazon Resource Name (ARN) of the schema. One of SchemaArn
or SchemaName
has to be provided.
SchemaId$SchemaName: The name of the schema. One of SchemaArn
or SchemaName
has to be provided.
Synchronizes a job to the source control repository. This operation takes the job artifacts from the Glue internal stores and makes a commit to the remote repository that is configured on the job.
\nThis API supports optional parameters which take in the repository information.
" + "smithy.api#documentation": "Synchronizes a job to the source control repository. This operation takes the job artifacts from the Glue internal stores and makes a commit to the remote repository that is configured on the job.
\n\n\tThis API supports optional parameters which take in the repository information.
" } }, "com.amazonaws.glue#UpdateSourceControlFromJobRequest": { diff --git a/codegen/sdk/aws-models/grafana.json b/codegen/sdk/aws-models/grafana.json index e5c7b4f5a79..7b127840365 100644 --- a/codegen/sdk/aws-models/grafana.json +++ b/codegen/sdk/aws-models/grafana.json @@ -3908,4 +3908,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/groundstation.json b/codegen/sdk/aws-models/groundstation.json index 0c3c487a010..615d64cd552 100644 --- a/codegen/sdk/aws-models/groundstation.json +++ b/codegen/sdk/aws-models/groundstation.json @@ -5539,4 +5539,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/guardduty.json b/codegen/sdk/aws-models/guardduty.json index c3e1591561c..4aea93404c2 100644 --- a/codegen/sdk/aws-models/guardduty.json +++ b/codegen/sdk/aws-models/guardduty.json @@ -1084,7 +1084,7 @@ "Name": { "target": "com.amazonaws.guardduty#FilterName", "traits": { - "smithy.api#documentation": "The name of the filter. Valid characters include period (.), underscore (_), \n dash (-), and alphanumeric characters. A whitespace is considered to be an invalid character.
", + "smithy.api#documentation": "The name of the filter. Minimum length of 3. Maximum length of 64. Valid characters include alphanumeric characters, dot (.), underscore (_), and dash (-). Spaces are not allowed.
", "smithy.api#jsonName": "name", "smithy.api#required": {} } @@ -1092,7 +1092,7 @@ "Description": { "target": "com.amazonaws.guardduty#FilterDescription", "traits": { - "smithy.api#documentation": "The description of the filter. Valid special characters include period (.), underscore (_), dash (-), \n and whitespace. The new line character is considered to be an invalid input for description.
", + "smithy.api#documentation": "The description of the filter.
", "smithy.api#jsonName": "description" } }, @@ -4977,14 +4977,14 @@ "name": "guardduty" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Amazon GuardDuty is a continuous security monitoring service that analyzes and processes\n the following data sources: VPC flow logs, Amazon Web Services CloudTrail management event logs, CloudTrail S3 data event\n logs, EKS audit logs, DNS logs, and Amazon EBS volume data.\n It uses threat intelligence\n feeds, such as lists of malicious IPs and domains, and machine learning to identify\n unexpected, potentially unauthorized, and malicious activity within your Amazon Web Services environment.\n This can include issues like escalations of privileges, uses of exposed credentials, or\n communication with malicious IPs, domains, or presence of malware on your \n Amazon EC2 instances and container workloads. For example, GuardDuty can detect\n compromised EC2 instances and container workloads serving malware, or mining bitcoin.
\nGuardDuty also monitors Amazon Web Services account access behavior for signs of compromise, such \n as unauthorized infrastructure deployments like EC2 instances deployed in a Region\n that has never been used, or unusual API calls like a password policy change to reduce\n password strength.
\nGuardDuty informs you about the status of your Amazon Web Services environment by producing \n security findings that you can view in the GuardDuty console or through Amazon EventBridge. \n For more information, see the \n Amazon\n GuardDuty User Guide\n .
", + "smithy.api#documentation": "Amazon GuardDuty is a continuous security monitoring service that analyzes and processes\n the following data sources: VPC flow logs, Amazon Web Services CloudTrail management event logs, CloudTrail S3 data event\n logs, EKS audit logs, and DNS logs.\n It uses threat intelligence\n feeds (such as lists of malicious IPs and domains) and machine learning to identify\n unexpected, potentially unauthorized, and malicious activity within your Amazon Web Services environment.\n This can include issues like escalations of privileges, uses of exposed credentials, or\n communication with malicious IPs, URLs, or domains. For example, GuardDuty can detect\n compromised EC2 instances that serve malware or mine bitcoin.
\nGuardDuty also monitors Amazon Web Services account access behavior for signs of compromise. Some examples\n of this are unauthorized infrastructure deployments such as EC2 instances deployed in a Region\n that has never been used, or unusual API calls like a password policy change to reduce\n password strength.
\nGuardDuty informs you of the status of your Amazon Web Services environment by producing security findings\n that you can view in the GuardDuty console or through Amazon CloudWatch events. For more\n information, see the \n Amazon\n GuardDuty User Guide\n .
", "smithy.api#title": "Amazon GuardDuty", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -5033,6 +5033,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -5320,9 +5329,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-south-1", "UseDualStack": true, - "Region": "ap-south-1" + "UseFIPS": true } }, { @@ -5333,9 +5342,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-south-1", "UseDualStack": false, - "Region": "ap-south-1" + "UseFIPS": true } }, { @@ -5346,9 +5355,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-south-1", "UseDualStack": true, - "Region": "ap-south-1" + "UseFIPS": false } }, { @@ -5359,9 +5368,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-south-1", "UseDualStack": false, - "Region": "ap-south-1" + "UseFIPS": false } }, { @@ -5372,9 +5381,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-south-1", "UseDualStack": true, - "Region": "eu-south-1" + "UseFIPS": true } }, { @@ -5385,9 +5394,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-south-1", "UseDualStack": false, - "Region": "eu-south-1" + "UseFIPS": true } }, { @@ -5398,9 +5407,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-south-1", "UseDualStack": true, - "Region": "eu-south-1" + "UseFIPS": false } }, { @@ -5411,9 +5420,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-south-1", "UseDualStack": false, - "Region": "eu-south-1" + "UseFIPS": false } }, { @@ -5424,9 +5433,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-gov-east-1", "UseDualStack": true, - "Region": "us-gov-east-1" + "UseFIPS": true } }, { @@ -5437,9 +5446,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-gov-east-1", "UseDualStack": false, - "Region": "us-gov-east-1" + "UseFIPS": true } }, { @@ -5450,9 +5459,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-gov-east-1", "UseDualStack": true, - "Region": "us-gov-east-1" + "UseFIPS": false } }, { @@ -5463,9 +5472,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-gov-east-1", "UseDualStack": false, - "Region": "us-gov-east-1" + "UseFIPS": false } }, { @@ -5476,9 +5485,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "me-central-1", "UseDualStack": true, - "Region": "me-central-1" + "UseFIPS": true } }, { @@ -5489,9 +5498,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "me-central-1", "UseDualStack": false, - "Region": "me-central-1" + "UseFIPS": true } }, { @@ -5502,9 +5511,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "me-central-1", "UseDualStack": true, - "Region": "me-central-1" + "UseFIPS": false } }, { @@ -5515,9 +5524,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "me-central-1", "UseDualStack": false, - "Region": "me-central-1" + "UseFIPS": false } }, { @@ -5528,9 +5537,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ca-central-1", "UseDualStack": true, - "Region": "ca-central-1" + "UseFIPS": true } }, { @@ -5541,9 +5550,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ca-central-1", "UseDualStack": false, - "Region": "ca-central-1" + "UseFIPS": true } }, { @@ -5554,9 +5563,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ca-central-1", "UseDualStack": true, - "Region": "ca-central-1" + "UseFIPS": false } }, { @@ -5567,9 +5576,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ca-central-1", "UseDualStack": false, - "Region": "ca-central-1" + "UseFIPS": false } }, { @@ -5580,9 +5589,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-central-1", "UseDualStack": true, - "Region": "eu-central-1" + "UseFIPS": true } }, { @@ -5593,9 +5602,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-central-1", "UseDualStack": false, - "Region": "eu-central-1" + "UseFIPS": true } }, { @@ -5606,9 +5615,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-central-1", "UseDualStack": true, - "Region": "eu-central-1" + "UseFIPS": false } }, { @@ -5619,61 +5628,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-central-1" - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://guardduty-fips.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://guardduty-fips.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://guardduty.eu-central-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-central-2" - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://guardduty.eu-central-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, + "Region": "eu-central-1", "UseDualStack": false, - "Region": "eu-central-2" + "UseFIPS": false } }, { @@ -5684,9 +5641,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-1", "UseDualStack": true, - "Region": "us-west-1" + "UseFIPS": true } }, { @@ -5697,9 +5654,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-1", "UseDualStack": false, - "Region": "us-west-1" + "UseFIPS": true } }, { @@ -5710,9 +5667,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-1", "UseDualStack": true, - "Region": "us-west-1" + "UseFIPS": false } }, { @@ -5723,9 +5680,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-1", "UseDualStack": false, - "Region": "us-west-1" + "UseFIPS": false } }, { @@ -5736,9 +5693,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-2", "UseDualStack": true, - "Region": "us-west-2" + "UseFIPS": true } }, { @@ -5749,9 +5706,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-2", "UseDualStack": false, - "Region": "us-west-2" + "UseFIPS": true } }, { @@ -5762,9 +5719,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-2", "UseDualStack": true, - "Region": "us-west-2" + "UseFIPS": false } }, { @@ -5775,9 +5732,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-2", "UseDualStack": false, - "Region": "us-west-2" + "UseFIPS": false } }, { @@ -5788,9 +5745,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "af-south-1", "UseDualStack": true, - "Region": "af-south-1" + "UseFIPS": true } }, { @@ -5801,9 +5758,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "af-south-1", "UseDualStack": false, - "Region": "af-south-1" + "UseFIPS": true } }, { @@ -5814,9 +5771,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "af-south-1", "UseDualStack": true, - "Region": "af-south-1" + "UseFIPS": false } }, { @@ -5827,9 +5784,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "af-south-1", "UseDualStack": false, - "Region": "af-south-1" + "UseFIPS": false } }, { @@ -5840,9 +5797,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-north-1", "UseDualStack": true, - "Region": "eu-north-1" + "UseFIPS": true } }, { @@ -5853,9 +5810,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-north-1", "UseDualStack": false, - "Region": "eu-north-1" + "UseFIPS": true } }, { @@ -5866,9 +5823,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-north-1", "UseDualStack": true, - "Region": "eu-north-1" + "UseFIPS": false } }, { @@ -5879,9 +5836,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-north-1", "UseDualStack": false, - "Region": "eu-north-1" + "UseFIPS": false } }, { @@ -5892,9 +5849,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-3", "UseDualStack": true, - "Region": "eu-west-3" + "UseFIPS": true } }, { @@ -5905,9 +5862,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-3", "UseDualStack": false, - "Region": "eu-west-3" + "UseFIPS": true } }, { @@ -5918,9 +5875,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-3", "UseDualStack": true, - "Region": "eu-west-3" + "UseFIPS": false } }, { @@ -5931,9 +5888,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-3", "UseDualStack": false, - "Region": "eu-west-3" + "UseFIPS": false } }, { @@ -5944,9 +5901,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-2", "UseDualStack": true, - "Region": "eu-west-2" + "UseFIPS": true } }, { @@ -5957,9 +5914,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-2", "UseDualStack": false, - "Region": "eu-west-2" + "UseFIPS": true } }, { @@ -5970,9 +5927,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-2", "UseDualStack": true, - "Region": "eu-west-2" + "UseFIPS": false } }, { @@ -5983,9 +5940,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-2", "UseDualStack": false, - "Region": "eu-west-2" + "UseFIPS": false } }, { @@ -5996,9 +5953,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-1", "UseDualStack": true, - "Region": "eu-west-1" + "UseFIPS": true } }, { @@ -6009,9 +5966,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-1", "UseDualStack": false, - "Region": "eu-west-1" + "UseFIPS": true } }, { @@ -6022,9 +5979,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-1", "UseDualStack": true, - "Region": "eu-west-1" + "UseFIPS": false } }, { @@ -6035,9 +5992,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-1", "UseDualStack": false, - "Region": "eu-west-1" + "UseFIPS": false } }, { @@ -6048,9 +6005,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-3", "UseDualStack": true, - "Region": "ap-northeast-3" + "UseFIPS": true } }, { @@ -6061,9 +6018,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-3", "UseDualStack": false, - "Region": "ap-northeast-3" + "UseFIPS": true } }, { @@ -6074,9 +6031,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-3", "UseDualStack": true, - "Region": "ap-northeast-3" + "UseFIPS": false } }, { @@ -6087,9 +6044,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-3", "UseDualStack": false, - "Region": "ap-northeast-3" + "UseFIPS": false } }, { @@ -6100,9 +6057,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-2", "UseDualStack": true, - "Region": "ap-northeast-2" + "UseFIPS": true } }, { @@ -6113,9 +6070,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-2", "UseDualStack": false, - "Region": "ap-northeast-2" + "UseFIPS": true } }, { @@ -6126,9 +6083,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-2", "UseDualStack": true, - "Region": "ap-northeast-2" + "UseFIPS": false } }, { @@ -6139,9 +6096,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-2", "UseDualStack": false, - "Region": "ap-northeast-2" + "UseFIPS": false } }, { @@ -6152,9 +6109,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-1", "UseDualStack": true, - "Region": "ap-northeast-1" + "UseFIPS": true } }, { @@ -6165,9 +6122,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-1", "UseDualStack": false, - "Region": "ap-northeast-1" + "UseFIPS": true } }, { @@ -6178,9 +6135,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-1", "UseDualStack": true, - "Region": "ap-northeast-1" + "UseFIPS": false } }, { @@ -6191,9 +6148,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-1", "UseDualStack": false, - "Region": "ap-northeast-1" + "UseFIPS": false } }, { @@ -6204,9 +6161,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "me-south-1", "UseDualStack": true, - "Region": "me-south-1" + "UseFIPS": true } }, { @@ -6217,9 +6174,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "me-south-1", "UseDualStack": false, - "Region": "me-south-1" + "UseFIPS": true } }, { @@ -6230,9 +6187,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "me-south-1", "UseDualStack": true, - "Region": "me-south-1" + "UseFIPS": false } }, { @@ -6243,9 +6200,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "me-south-1", "UseDualStack": false, - "Region": "me-south-1" + "UseFIPS": false } }, { @@ -6256,9 +6213,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "sa-east-1", "UseDualStack": true, - "Region": "sa-east-1" + "UseFIPS": true } }, { @@ -6269,9 +6226,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "sa-east-1", "UseDualStack": false, - "Region": "sa-east-1" + "UseFIPS": true } }, { @@ -6282,9 +6239,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "sa-east-1", "UseDualStack": true, - "Region": "sa-east-1" + "UseFIPS": false } }, { @@ -6295,9 +6252,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "sa-east-1", "UseDualStack": false, - "Region": "sa-east-1" + "UseFIPS": false } }, { @@ -6308,9 +6265,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-east-1", "UseDualStack": true, - "Region": "ap-east-1" + "UseFIPS": true } }, { @@ -6321,9 +6278,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-east-1", "UseDualStack": false, - "Region": "ap-east-1" + "UseFIPS": true } }, { @@ -6334,9 +6291,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-east-1", "UseDualStack": true, - "Region": "ap-east-1" + "UseFIPS": false } }, { @@ -6347,9 +6304,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-east-1", "UseDualStack": false, - "Region": "ap-east-1" + "UseFIPS": false } }, { @@ -6360,9 +6317,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "cn-north-1", "UseDualStack": true, - "Region": "cn-north-1" + "UseFIPS": true } }, { @@ -6373,9 +6330,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "cn-north-1", "UseDualStack": false, - "Region": "cn-north-1" + "UseFIPS": true } }, { @@ -6386,9 +6343,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "cn-north-1", "UseDualStack": true, - "Region": "cn-north-1" + "UseFIPS": false } }, { @@ -6399,9 +6356,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "cn-north-1", "UseDualStack": false, - "Region": "cn-north-1" + "UseFIPS": false } }, { @@ -6412,9 +6369,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-gov-west-1", "UseDualStack": true, - "Region": "us-gov-west-1" + "UseFIPS": true } }, { @@ -6425,9 +6382,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-gov-west-1", "UseDualStack": false, - "Region": "us-gov-west-1" + "UseFIPS": true } }, { @@ -6438,9 +6395,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-gov-west-1", "UseDualStack": true, - "Region": "us-gov-west-1" + "UseFIPS": false } }, { @@ -6451,9 +6408,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-gov-west-1", "UseDualStack": false, - "Region": "us-gov-west-1" + "UseFIPS": false } }, { @@ -6464,9 +6421,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-1", "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": true } }, { @@ -6477,9 +6434,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-1", "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": true } }, { @@ -6490,9 +6447,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-1", "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": false } }, { @@ -6503,9 +6460,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-1", "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": false } }, { @@ -6516,9 +6473,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-2", "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": true } }, { @@ -6529,9 +6486,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-2", "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": true } }, { @@ -6542,9 +6499,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-2", "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": false } }, { @@ -6555,9 +6512,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-2", "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": false } }, { @@ -6566,9 +6523,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, + "Region": "us-iso-east-1", "UseDualStack": true, - "Region": "us-iso-east-1" + "UseFIPS": true } }, { @@ -6579,9 +6536,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-iso-east-1", "UseDualStack": false, - "Region": "us-iso-east-1" + "UseFIPS": true } }, { @@ -6590,9 +6547,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, + "Region": "us-iso-east-1", "UseDualStack": true, - "Region": "us-iso-east-1" + "UseFIPS": false } }, { @@ -6603,9 +6560,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-iso-east-1", "UseDualStack": false, - "Region": "us-iso-east-1" + "UseFIPS": false } }, { @@ -6616,9 +6573,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-3", "UseDualStack": true, - "Region": "ap-southeast-3" + "UseFIPS": true } }, { @@ -6629,9 +6586,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-3", "UseDualStack": false, - "Region": "ap-southeast-3" + "UseFIPS": true } }, { @@ -6642,9 +6599,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-3", "UseDualStack": true, - "Region": "ap-southeast-3" + "UseFIPS": false } }, { @@ -6655,9 +6612,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-3", "UseDualStack": false, - "Region": "ap-southeast-3" + "UseFIPS": false } }, { @@ -6668,9 +6625,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-1", "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": true } }, { @@ -6681,9 +6638,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-east-1" + "UseFIPS": true } }, { @@ -6694,9 +6651,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": false } }, { @@ -6707,9 +6664,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-east-1" + "UseFIPS": false } }, { @@ -6720,9 +6677,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-2", "UseDualStack": true, - "Region": "us-east-2" + "UseFIPS": true } }, { @@ -6733,9 +6690,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-2", "UseDualStack": false, - "Region": "us-east-2" + "UseFIPS": true } }, { @@ -6746,9 +6703,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-2", "UseDualStack": true, - "Region": "us-east-2" + "UseFIPS": false } }, { @@ -6759,9 +6716,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-2", "UseDualStack": false, - "Region": "us-east-2" + "UseFIPS": false } }, { @@ -6772,9 +6729,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "cn-northwest-1", "UseDualStack": true, - "Region": "cn-northwest-1" + "UseFIPS": true } }, { @@ -6785,9 +6742,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "cn-northwest-1", "UseDualStack": false, - "Region": "cn-northwest-1" + "UseFIPS": true } }, { @@ -6798,9 +6755,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "cn-northwest-1", "UseDualStack": true, - "Region": "cn-northwest-1" + "UseFIPS": false } }, { @@ -6811,9 +6768,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "cn-northwest-1", "UseDualStack": false, - "Region": "cn-northwest-1" + "UseFIPS": false } }, { @@ -6822,9 +6779,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, + "Region": "us-isob-east-1", "UseDualStack": true, - "Region": "us-isob-east-1" + "UseFIPS": true } }, { @@ -6835,9 +6792,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-isob-east-1", "UseDualStack": false, - "Region": "us-isob-east-1" + "UseFIPS": true } }, { @@ -6846,9 +6803,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, + "Region": "us-isob-east-1", "UseDualStack": true, - "Region": "us-isob-east-1" + "UseFIPS": false } }, { @@ -6859,9 +6816,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-isob-east-1", "UseDualStack": false, - "Region": "us-isob-east-1" + "UseFIPS": false } }, { @@ -6872,9 +6829,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -6884,9 +6841,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -6896,9 +6853,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } @@ -9701,7 +9658,7 @@ "TriggerDetails": { "target": "com.amazonaws.guardduty#TriggerDetails", "traits": { - "smithy.api#documentation": "Specifies the reason why the scan was initiated.
", + "smithy.api#documentation": "Represents the reason the scan was triggered.
", "smithy.api#jsonName": "triggerDetails" } }, @@ -11044,7 +11001,7 @@ "Description": { "target": "com.amazonaws.guardduty#FilterDescription", "traits": { - "smithy.api#documentation": "The description of the filter. Valid special characters include period (.), underscore (_), dash (-), \n and whitespace. The new line character is considered to be an invalid input for description.
", + "smithy.api#documentation": "The description of the filter.
", "smithy.api#jsonName": "description" } }, diff --git a/codegen/sdk/aws-models/inspector2.json b/codegen/sdk/aws-models/inspector2.json index 7bc89e257f3..e9d4ccb039f 100644 --- a/codegen/sdk/aws-models/inspector2.json +++ b/codegen/sdk/aws-models/inspector2.json @@ -9523,4 +9523,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/iot-roborunner.json b/codegen/sdk/aws-models/iot-roborunner.json index d0bb8dcaa73..0ea847d9c80 100644 --- a/codegen/sdk/aws-models/iot-roborunner.json +++ b/codegen/sdk/aws-models/iot-roborunner.json @@ -3263,4 +3263,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/iotfleetwise.json b/codegen/sdk/aws-models/iotfleetwise.json index e309cefb94c..eb8b1a6937d 100644 --- a/codegen/sdk/aws-models/iotfleetwise.json +++ b/codegen/sdk/aws-models/iotfleetwise.json @@ -2614,9 +2614,6 @@ }, { "target": "com.amazonaws.iotfleetwise#ThrottlingException" - }, - { - "target": "com.amazonaws.iotfleetwise#ValidationException" } ], "traits": { @@ -3568,6 +3565,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -3830,9 +3836,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -3843,9 +3849,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -3856,9 +3862,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -3869,9 +3875,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -3882,9 +3888,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -3895,9 +3901,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -3908,9 +3914,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -3921,9 +3927,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -3934,9 +3940,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -3946,9 +3952,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -3958,9 +3964,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": true, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } } @@ -6980,9 +6986,6 @@ }, { "target": "com.amazonaws.iotfleetwise#ThrottlingException" - }, - { - "target": "com.amazonaws.iotfleetwise#ValidationException" } ], "traits": { @@ -7790,4 +7793,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/iottwinmaker.json b/codegen/sdk/aws-models/iottwinmaker.json index 9f7c3ac10d9..2514619517d 100644 --- a/codegen/sdk/aws-models/iottwinmaker.json +++ b/codegen/sdk/aws-models/iottwinmaker.json @@ -6787,4 +6787,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/ivschat.json b/codegen/sdk/aws-models/ivschat.json index ce9b1da8a3c..ee086fe97e8 100644 --- a/codegen/sdk/aws-models/ivschat.json +++ b/codegen/sdk/aws-models/ivschat.json @@ -3132,4 +3132,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/kendra.json b/codegen/sdk/aws-models/kendra.json index f549c467df0..d4aa2c4f838 100644 --- a/codegen/sdk/aws-models/kendra.json +++ b/codegen/sdk/aws-models/kendra.json @@ -9252,7 +9252,7 @@ "type": "float", "traits": { "smithy.api#range": { - "min": 1.0E-6, + "min": 0.000001, "max": 50 } } diff --git a/codegen/sdk/aws-models/kinesis-video.json b/codegen/sdk/aws-models/kinesis-video.json index 3584812a671..41e9837a328 100644 --- a/codegen/sdk/aws-models/kinesis-video.json +++ b/codegen/sdk/aws-models/kinesis-video.json @@ -221,12 +221,6 @@ "traits": { "smithy.api#enumValue": "HTTPS" } - }, - "WEBRTC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WEBRTC" - } } } }, @@ -799,148 +793,6 @@ } } }, - "com.amazonaws.kinesisvideo#DescribeMappedResourceConfiguration": { - "type": "operation", - "input": { - "target": "com.amazonaws.kinesisvideo#DescribeMappedResourceConfigurationInput" - }, - "output": { - "target": "com.amazonaws.kinesisvideo#DescribeMappedResourceConfigurationOutput" - }, - "errors": [ - { - "target": "com.amazonaws.kinesisvideo#AccessDeniedException" - }, - { - "target": "com.amazonaws.kinesisvideo#ClientLimitExceededException" - }, - { - "target": "com.amazonaws.kinesisvideo#InvalidArgumentException" - }, - { - "target": "com.amazonaws.kinesisvideo#ResourceNotFoundException" - } - ], - "traits": { - "smithy.api#documentation": "Returns the most current information about the stream. Either streamName or streamARN should be provided in the input.
\nReturns the most current information about the stream. The streamName
\n or streamARN
should be provided in the input.
The name of the stream.
" - } - }, - "StreamARN": { - "target": "com.amazonaws.kinesisvideo#ResourceARN", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the stream.
" - } - }, - "MaxResults": { - "target": "com.amazonaws.kinesisvideo#MappedResourceConfigurationListLimit", - "traits": { - "smithy.api#documentation": "The maximum number of results to return in the response.
" - } - }, - "NextToken": { - "target": "com.amazonaws.kinesisvideo#NextToken", - "traits": { - "smithy.api#documentation": "The token to provide in your next request, to get another batch of results.
" - } - } - } - }, - "com.amazonaws.kinesisvideo#DescribeMappedResourceConfigurationOutput": { - "type": "structure", - "members": { - "MappedResourceConfigurationList": { - "target": "com.amazonaws.kinesisvideo#MappedResourceConfigurationList", - "traits": { - "smithy.api#documentation": "A structure that encapsulates, or contains, the media storage configuration properties.
" - } - }, - "NextToken": { - "target": "com.amazonaws.kinesisvideo#NextToken", - "traits": { - "smithy.api#documentation": "The token that was used in the NextToken
request to fetch the next set of results.
Returns the most current information about the channel. Specify the ChannelName
\n or ChannelARN
in the input.
The name of the channel.
" - } - }, - "ChannelARN": { - "target": "com.amazonaws.kinesisvideo#ResourceARN", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the channel.
" - } - } - } - }, - "com.amazonaws.kinesisvideo#DescribeMediaStorageConfigurationOutput": { - "type": "structure", - "members": { - "MediaStorageConfiguration": { - "target": "com.amazonaws.kinesisvideo#MediaStorageConfiguration", - "traits": { - "smithy.api#documentation": "A structure that encapsulates, or contains, the media storage configuration properties.
" - } - } - } - }, "com.amazonaws.kinesisvideo#DescribeNotificationConfiguration": { "type": "operation", "input": { @@ -1190,7 +1042,7 @@ "UploaderConfig": { "target": "com.amazonaws.kinesisvideo#UploaderConfig", "traits": { - "smithy.api#documentation": "The uploader configuration contains the ScheduleExpression
details that are used to\n schedule upload jobs for the recorded media files from the Edge Agent to a Kinesis Video Stream.
The uploader configuration contains the ScheduleExpression
details that are used, to\n schedule upload jobs for the recorded media files from the Edge Agent, to a Kinesis Video Stream.
The configuration details that include the maximum size of the media \n (MaxLocalMediaSizeInMB
) that you want to\n store for a stream on the Edge Agent, as well as the strategy that should be used (StrategyOnFullSize
) when a stream's\n maximum size has been reached.
The type of the associated resource for the kinesis video stream.
" - } - }, - "ARN": { - "target": "com.amazonaws.kinesisvideo#ResourceARN", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Kinesis Video Stream resource, associated with the stream.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A structure that encapsulates, or contains, the media storage configuration properties.
" - } - }, - "com.amazonaws.kinesisvideo#MappedResourceConfigurationListLimit": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 1, - "max": 1 - } - } - }, "com.amazonaws.kinesisvideo#MaxLocalMediaSizeInMB": { "type": "integer", "traits": { @@ -3547,7 +3453,7 @@ "MediaUriType": { "target": "com.amazonaws.kinesisvideo#MediaUriType", "traits": { - "smithy.api#documentation": "The Uniform Resource Identifier (URI) type. The FILE_URI
value can be used to stream \n local media files.
Preview only supports the RTSP_URI
media source URI format .
The Uniform Resource Identifier (Uri) type. The FILE_URI
value can be used to stream \n local media files.
The configuration details that consist of the credentials required \n (MediaUriSecretArn
and MediaUriType
) to access the media files that are \n streamed to the camera.
The Amazon Resource Name (ARN) of the stream
" - } - }, - "Status": { - "target": "com.amazonaws.kinesisvideo#MediaStorageConfigurationStatus", - "traits": { - "smithy.api#documentation": "The status of the media storage configuration.
", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "A structure that encapsulates, or contains, the media storage configuration properties.
" - } - }, - "com.amazonaws.kinesisvideo#MediaStorageConfigurationStatus": { - "type": "enum", - "members": { - "ENABLED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENABLED" - } - }, - "DISABLED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DISABLED" - } - } - } - }, "com.amazonaws.kinesisvideo#MediaType": { "type": "string", "traits": { @@ -3782,7 +3650,7 @@ } }, "traits": { - "smithy.api#documentation": "When the input StreamARN
or ChannelARN
\n in CLOUD_STORAGE_MODE
is already mapped to a different\n Kinesis Video Stream resource, or if the provided input StreamARN
\n or ChannelARN
is not in Active status, try one of the following :
The DescribeMediaStorageConfiguration
API to determine what the stream given channel is mapped to. \n
The DescribeMappedResourceConfiguration
API to determine the channel that the given stream is mapped to. \n
The DescribeStream
or DescribeSignalingChannel
API to determine the status of the resource.\n
The resource is currently not available for this operation. New resources cannot be\n created with the same name as existing resources. Also, resources cannot be updated or\n deleted unless they are in an ACTIVE
state.
If this exception is returned, do not use it to determine whether the requested\n resource already exists. Instead, it is recommended you use the resource-specific\n describe API, for example, DescribeStream
for video streams.
An asynchronous API that updates a stream’s existing edge configuration. \n The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent IoT Greengrass \n component that runs on an IoT Hub Device, setup at your premise. The time to sync can vary\n and depends on the connectivity of the Hub Device. \n The SyncStatus
will be updated as the edge configuration is acknowledged, \n and synced with the Edge Agent.
If this API is invoked for the first time, a new edge configuration will be created for the stream,\n and the sync status will be set to SYNCING
. You will have to wait for the sync status\n to reach a terminal state such as: IN_SYNC
, or SYNC_FAILED
, before using this API again. \n If you invoke this API during the syncing process, a ResourceInUseException
will be thrown. \n The connectivity of the stream’s edge configuration and the Edge Agent will be retried for 15 minutes. After 15 minutes,\n the status will transition into the SYNC_FAILED
state.
An asynchronous API that updates a stream’s existing edge configuration. If this API is invoked for the\n first time, a new edge configuration will be created for the stream, and the sync status will be set to \n SYNCING
.
The Kinesis Video Stream will sync the stream’s edge configuration with the Edge Agent \n IoT Greengrass component that runs on an IoT Hub Device setup at your premise. \n The time to sync can vary and depends on the connectivity of the Hub Device. \n The SyncStatus
will be updated as the edge configuration is acknowledged, \n and synced with the Edge Agent. You will have to wait for the sync status to reach a terminal state such as:\n IN_SYNC
and SYNC_FAILED
, before using this API again.
If you invoke this API during the syncing process, \n a ResourceInUseException
will be thrown. The connectivity of the stream's edge configuration\n and the Edge Agent will be retried \n for 15 minutes. After 15 minutes, the status will transition into the SYNC_FAILED
state.\n
Associates a SignalingChannel
to a stream to store the media. There are two signaling modes that \n can specified :
If the StorageStatus
is disabled, no data will be stored,\n and the StreamARN
parameter will not be needed.
If the StorageStatus
is enabled, the data will be stored in the \n StreamARN
provided.
The Amazon Resource Name (ARN) of the channel.
", - "smithy.api#required": {} - } - }, - "MediaStorageConfiguration": { - "target": "com.amazonaws.kinesisvideo#MediaStorageConfiguration", - "traits": { - "smithy.api#documentation": "A structure that encapsulates, or contains, the media storage configuration properties.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.kinesisvideo#UpdateMediaStorageConfigurationOutput": { - "type": "structure", - "members": {} - }, "com.amazonaws.kinesisvideo#UpdateNotificationConfiguration": { "type": "operation", "input": { @@ -4970,7 +4775,7 @@ } }, "traits": { - "smithy.api#documentation": "The configuration that consists of the ScheduleExpression
\n and the DurationInMinutesdetails
, that specify the scheduling to record from a camera,\n or local media file, onto the Edge Agent. If the ScheduleExpression
\n is not provided, then the Edge Agent will always be in upload mode.\n
The configuration that consists of the ScheduleConfig
attribute that's required, to schedule the jobs \n to upload the recorded media files onto the Edge Agent in a Kinesis Video Stream.\n
Specifies that you do not have the permissions required to perform this\n operation.
", - "smithy.api#error": "client" - } - }, "com.amazonaws.kinesis#AddTagsToStream": { "type": "operation", "input": { @@ -50,9 +38,6 @@ "target": "smithy.api#Unit" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -67,12 +52,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds or updates tags for the specified Kinesis data stream. You can assign up to 50\n tags to a data stream.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
If tags have already been assigned to the stream, AddTagsToStream
\n overwrites any existing tags that correspond to the specified tag keys.
\n AddTagsToStream has a limit of five transactions per second per\n account.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Adds or updates tags for the specified Kinesis data stream. You can assign up to 50\n tags to a data stream.
\nIf tags have already been assigned to the stream, AddTagsToStream
\n overwrites any existing tags that correspond to the specified tag keys.
\n AddTagsToStream has a limit of five transactions per second per\n account.
" } }, "com.amazonaws.kinesis#AddTagsToStreamInput": { @@ -81,7 +61,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream.
" + "smithy.api#documentation": "The name of the stream.
", + "smithy.api#required": {} } }, "Tags": { @@ -90,15 +71,6 @@ "smithy.api#documentation": "A set of up to 10 key-value pairs to use to create the tags.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -155,7 +127,7 @@ "ConsumerARN": { "target": "com.amazonaws.kinesis#ConsumerARN", "traits": { - "smithy.api#documentation": "When you register a consumer, Kinesis Data Streams generates an ARN for it. You need\n this ARN to be able to call SubscribeToShard.
\nIf you delete a consumer and then create a new one with the same name, it won't have\n the same ARN. That's because consumer ARNs contain the creation timestamp. This is\n important to keep in mind if you have IAM policies that reference consumer ARNs.
", + "smithy.api#documentation": "When you register a consumer, Kinesis Data Streams generates an ARN for it. You need\n this ARN to be able to call SubscribeToShard.
\nIf you delete a consumer and then create a new one with the same name, it won't have\n the same ARN. That's because consumer ARNs contain the creation timestamp. This is\n important to keep in mind if you have IAM policies that reference consumer ARNs.
", "smithy.api#required": {} } }, @@ -210,7 +182,7 @@ "ConsumerARN": { "target": "com.amazonaws.kinesis#ConsumerARN", "traits": { - "smithy.api#documentation": "When you register a consumer, Kinesis Data Streams generates an ARN for it. You need\n this ARN to be able to call SubscribeToShard.
\nIf you delete a consumer and then create a new one with the same name, it won't have\n the same ARN. That's because consumer ARNs contain the creation timestamp. This is\n important to keep in mind if you have IAM policies that reference consumer ARNs.
", + "smithy.api#documentation": "When you register a consumer, Kinesis Data Streams generates an ARN for it. You need\n this ARN to be able to call SubscribeToShard.
\nIf you delete a consumer and then create a new one with the same name, it won't have\n the same ARN. That's because consumer ARNs contain the creation timestamp. This is\n important to keep in mind if you have IAM policies that reference consumer ARNs.
", "smithy.api#required": {} } }, @@ -257,26 +229,22 @@ } }, "com.amazonaws.kinesis#ConsumerStatus": { - "type": "enum", - "members": { - "CREATING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATING" - } - }, - "DELETING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETING" - } - }, - "ACTIVE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" } - } + ] } }, "com.amazonaws.kinesis#CreateStream": { @@ -299,7 +267,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a Kinesis data stream. A stream captures and transports data records that are\n continuously emitted from different data sources or producers.\n Scale-out within a stream is explicitly supported by means of shards, which are uniquely\n identified groups of data records in a stream.
\nYou can create your data stream using either on-demand or provisioned capacity mode.\n Data streams with an on-demand mode require no capacity planning and automatically scale\n to handle gigabytes of write and read throughput per minute. With the on-demand mode,\n Kinesis Data Streams automatically manages the shards in order to provide the necessary\n throughput. For the data streams with a provisioned mode, you must specify the number of\n shards for the data stream. Each shard can support reads up to five transactions per\n second, up to a maximum data read total of 2 MiB per second. Each shard can support\n writes up to 1,000 records per second, up to a maximum data write total of 1 MiB per\n second. If the amount of data input increases or decreases, you can add or remove\n shards.
\nThe stream name identifies the stream. The name is scoped to the Amazon Web Services\n account used by the application. It is also scoped by Amazon Web Services Region. That\n is, two streams in two different accounts can have the same name, and two streams in the\n same account, but in two different Regions, can have the same name.
\n\n CreateStream
is an asynchronous operation. Upon receiving a\n CreateStream
request, Kinesis Data Streams immediately returns and sets\n the stream status to CREATING
. After the stream is created, Kinesis Data\n Streams sets the stream status to ACTIVE
. You should perform read and write\n operations only on an ACTIVE
stream.
You receive a LimitExceededException
when making a\n CreateStream
request when you try to do one of the following:
Have more than five streams in the CREATING
state at any point in\n time.
Create more shards than are authorized for your account.
\nFor the default shard limit for an Amazon Web Services account, see Amazon\n Kinesis Data Streams Limits in the Amazon Kinesis Data Streams\n Developer Guide. To increase this limit, contact Amazon Web Services\n Support.
\nYou can use DescribeStreamSummary to check the stream status, which\n is returned in StreamStatus
.
\n CreateStream has a limit of five transactions per second per\n account.
" + "smithy.api#documentation": "Creates a Kinesis data stream. A stream captures and transports data records that are\n continuously emitted from different data sources or producers.\n Scale-out within a stream is explicitly supported by means of shards, which are uniquely\n identified groups of data records in a stream.
\nYou specify and control the number of shards that a stream is composed of. Each shard\n can support reads up to five transactions per second, up to a maximum data read total of\n 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a\n maximum data write total of 1 MiB per second. If the amount of data input increases or\n decreases, you can add or remove shards.
\nThe stream name identifies the stream. The name is scoped to the Amazon Web Services\n account used by the application. It is also scoped by Amazon Web Services Region. That\n is, two streams in two different accounts can have the same name, and two streams in the\n same account, but in two different Regions, can have the same name.
\n\n CreateStream
is an asynchronous operation. Upon receiving a\n CreateStream
request, Kinesis Data Streams immediately returns and sets\n the stream status to CREATING
. After the stream is created, Kinesis Data\n Streams sets the stream status to ACTIVE
. You should perform read and write\n operations only on an ACTIVE
stream.
You receive a LimitExceededException
when making a\n CreateStream
request when you try to do one of the following:
Have more than five streams in the CREATING
state at any point in\n time.
Create more shards than are authorized for your account.
\nFor the default shard limit for an Amazon Web Services account, see Amazon\n Kinesis Data Streams Limits in the Amazon Kinesis Data Streams\n Developer Guide. To increase this limit, contact Amazon Web Services\n Support.
\nYou can use DescribeStreamSummary to check the stream status, which\n is returned in StreamStatus
.
\n CreateStream has a limit of five transactions per second per\n account.
" } }, "com.amazonaws.kinesis#CreateStreamInput": { @@ -347,9 +315,6 @@ "target": "smithy.api#Unit" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -364,12 +329,7 @@ } ], "traits": { - "smithy.api#documentation": "Decreases the Kinesis data stream's retention period, which is the length of time data\n records are accessible after they are added to the stream. The minimum value of a\n stream's retention period is 24 hours.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
This operation may result in lost data. For example, if the stream's retention period\n is 48 hours and is decreased to 24 hours, any data already in the stream that is older\n than 24 hours is inaccessible.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Decreases the Kinesis data stream's retention period, which is the length of time data\n records are accessible after they are added to the stream. The minimum value of a\n stream's retention period is 24 hours.
\nThis operation may result in lost data. For example, if the stream's retention period\n is 48 hours and is decreased to 24 hours, any data already in the stream that is older\n than 24 hours is inaccessible.
" } }, "com.amazonaws.kinesis#DecreaseStreamRetentionPeriodInput": { @@ -378,7 +338,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream to modify.
" + "smithy.api#documentation": "The name of the stream to modify.
", + "smithy.api#required": {} } }, "RetentionPeriodHours": { @@ -387,15 +348,6 @@ "smithy.api#documentation": "The new retention period of the stream, in hours. Must be less than the current\n retention period.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -411,12 +363,6 @@ "target": "smithy.api#Unit" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, - { - "target": "com.amazonaws.kinesis#InvalidArgumentException" - }, { "target": "com.amazonaws.kinesis#LimitExceededException" }, @@ -428,12 +374,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes a Kinesis data stream and all its shards and data. You must shut down any\n applications that are operating on the stream before you delete the stream. If an\n application attempts to operate on a deleted stream, it receives the exception\n ResourceNotFoundException
.
When invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
If the stream is in the ACTIVE
state, you can delete it. After a\n DeleteStream
request, the specified stream is in the\n DELETING
state until Kinesis Data Streams completes the\n deletion.
\n Note: Kinesis Data Streams might continue to accept\n data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the\n DELETING
state until the stream deletion is complete.
When you delete a stream, any shards in that stream are also deleted, and any tags are\n dissociated from the stream.
\nYou can use the DescribeStreamSummary operation to check the state\n of the stream, which is returned in StreamStatus
.
\n DeleteStream has a limit of five transactions per second per\n account.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Deletes a Kinesis data stream and all its shards and data. You must shut down any\n applications that are operating on the stream before you delete the stream. If an\n application attempts to operate on a deleted stream, it receives the exception\n ResourceNotFoundException
.
If the stream is in the ACTIVE
state, you can delete it. After a\n DeleteStream
request, the specified stream is in the\n DELETING
state until Kinesis Data Streams completes the\n deletion.
\n Note: Kinesis Data Streams might continue to accept\n data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the\n DELETING
state until the stream deletion is complete.
When you delete a stream, any shards in that stream are also deleted, and any tags are\n dissociated from the stream.
\nYou can use the DescribeStreamSummary operation to check the state\n of the stream, which is returned in StreamStatus
.
\n DeleteStream has a limit of five transactions per second per\n account.
" } }, "com.amazonaws.kinesis#DeleteStreamInput": { @@ -442,7 +383,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream to delete.
" + "smithy.api#documentation": "The name of the stream to delete.
", + "smithy.api#required": {} } }, "EnforceConsumerDeletion": { @@ -450,15 +392,6 @@ "traits": { "smithy.api#documentation": "If this parameter is unset (null
) or if you set it to false
,\n and the stream has registered consumers, the call to DeleteStream
fails\n with a ResourceInUseException
.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -485,12 +418,7 @@ } ], "traits": { - "smithy.api#documentation": "To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of\n the data stream and the name you gave the consumer when you registered it. You may also\n provide all three parameters, as long as they don't conflict with each other. If you\n don't know the name or ARN of the consumer that you want to deregister, you can use the\n ListStreamConsumers operation to get a list of the descriptions of\n all the consumers that are currently registered with a given data stream. The\n description of a consumer contains its name and ARN.
\nThis operation has a limit of five transactions per second per stream.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "To deregister a consumer, provide its ARN. Alternatively, you can provide the ARN of\n the data stream and the name you gave the consumer when you registered it. You may also\n provide all three parameters, as long as they don't conflict with each other. If you\n don't know the name or ARN of the consumer that you want to deregister, you can use the\n ListStreamConsumers operation to get a list of the descriptions of\n all the consumers that are currently registered with a given data stream. The\n description of a consumer contains its name and ARN.
\nThis operation has a limit of five transactions per second per stream.
" } }, "com.amazonaws.kinesis#DeregisterStreamConsumerInput": { @@ -499,10 +427,7 @@ "StreamARN": { "target": "com.amazonaws.kinesis#StreamARN", "traits": { - "smithy.api#documentation": "The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.
" } }, "ConsumerName": { @@ -514,10 +439,7 @@ "ConsumerARN": { "target": "com.amazonaws.kinesis#ConsumerARN", "traits": { - "smithy.api#documentation": "The ARN returned by Kinesis Data Streams when you registered the consumer. If you\n don't know the ARN of the consumer that you want to deregister, you can use the\n ListStreamConsumers operation to get a list of the descriptions of all the consumers\n that are currently registered with a given data stream. The description of a consumer\n contains its ARN.
", - "smithy.rules#contextParam": { - "name": "ConsumerARN" - } + "smithy.api#documentation": "The ARN returned by Kinesis Data Streams when you registered the consumer. If you\n don't know the ARN of the consumer that you want to deregister, you can use the\n ListStreamConsumers operation to get a list of the descriptions of all the consumers\n that are currently registered with a given data stream. The description of a consumer\n contains its ARN.
" } } } @@ -536,7 +458,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the shard limits and usage for the account.
\nIf you update your account limits, the old limits might be returned for a few\n minutes.
\nThis operation has a limit of one transaction per second per account.
" + "smithy.api#documentation": "Describes the shard limits and usage for the account.
\nIf you update your account limits, the old limits might be returned for a few\n minutes.
\nThis operation has a limit of one transaction per second per account.
" } }, "com.amazonaws.kinesis#DescribeLimitsInput": { @@ -585,12 +507,6 @@ "target": "com.amazonaws.kinesis#DescribeStreamOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, - { - "target": "com.amazonaws.kinesis#InvalidArgumentException" - }, { "target": "com.amazonaws.kinesis#LimitExceededException" }, @@ -599,12 +515,7 @@ } ], "traits": { - "smithy.api#documentation": "Describes the specified Kinesis data stream.
\nThis API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get a summarized description of the\n specified Kinesis data stream and the ListShards API to list the\n shards in a specified data stream and obtain information about each shard.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
The information returned includes the stream name, Amazon Resource Name (ARN),\n creation time, enhanced metric configuration, and shard map. The shard map is an array\n of shard objects. For each shard object, there is the hash key and sequence number\n ranges that the shard spans, and the IDs of any earlier shards that played in a role in\n creating the shard. Every record ingested in the stream is identified by a sequence\n number, which is assigned when the record is put into the stream.
\nYou can limit the number of shards returned by each call. For more information, see\n Retrieving\n Shards from a Stream in the Amazon Kinesis Data Streams Developer\n Guide.
\nThere are no guarantees about the chronological order shards returned. To process\n shards in chronological order, use the ID of the parent shard to track the lineage to\n the oldest shard.
\nThis operation has a limit of 10 transactions per second per account.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - }, + "smithy.api#documentation": "Describes the specified Kinesis data stream.
\nThis API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get a summarized description of the\n specified Kinesis data stream and the ListShards API to list the\n shards in a specified data stream and obtain information about each shard.
\nThe information returned includes the stream name, Amazon Resource Name (ARN),\n creation time, enhanced metric configuration, and shard map. The shard map is an array\n of shard objects. For each shard object, there is the hash key and sequence number\n ranges that the shard spans, and the IDs of any earlier shards that played in a role in\n creating the shard. Every record ingested in the stream is identified by a sequence\n number, which is assigned when the record is put into the stream.
\n\nYou can limit the number of shards returned by each call. For more information, see\n Retrieving\n Shards from a Stream in the Amazon Kinesis Data Streams Developer\n Guide.
\nThere are no guarantees about the chronological order shards returned. To process\n shards in chronological order, use the ID of the parent shard to track the lineage to\n the oldest shard.
\nThis operation has a limit of 10 transactions per second per account.
", "smithy.waiters#waitable": { "StreamExists": { "acceptors": [ @@ -655,12 +566,7 @@ } ], "traits": { - "smithy.api#documentation": "To get the description of a registered consumer, provide the ARN of the consumer.\n Alternatively, you can provide the ARN of the data stream and the name you gave the\n consumer when you registered it. You may also provide all three parameters, as long as\n they don't conflict with each other. If you don't know the name or ARN of the consumer\n that you want to describe, you can use the ListStreamConsumers\n operation to get a list of the descriptions of all the consumers that are currently\n registered with a given data stream.
\nThis operation has a limit of 20 transactions per second per stream.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "To get the description of a registered consumer, provide the ARN of the consumer.\n Alternatively, you can provide the ARN of the data stream and the name you gave the\n consumer when you registered it. You may also provide all three parameters, as long as\n they don't conflict with each other. If you don't know the name or ARN of the consumer\n that you want to describe, you can use the ListStreamConsumers\n operation to get a list of the descriptions of all the consumers that are currently\n registered with a given data stream.
\nThis operation has a limit of 20 transactions per second per stream.
" } }, "com.amazonaws.kinesis#DescribeStreamConsumerInput": { @@ -669,10 +575,7 @@ "StreamARN": { "target": "com.amazonaws.kinesis#StreamARN", "traits": { - "smithy.api#documentation": "The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.
" } }, "ConsumerName": { @@ -684,10 +587,7 @@ "ConsumerARN": { "target": "com.amazonaws.kinesis#ConsumerARN", "traits": { - "smithy.api#documentation": "The ARN returned by Kinesis Data Streams when you registered the consumer.
", - "smithy.rules#contextParam": { - "name": "ConsumerARN" - } + "smithy.api#documentation": "The ARN returned by Kinesis Data Streams when you registered the consumer.
" } } } @@ -710,7 +610,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream to describe.
" + "smithy.api#documentation": "The name of the stream to describe.
", + "smithy.api#required": {} } }, "Limit": { @@ -722,16 +623,7 @@ "ExclusiveStartShardId": { "target": "com.amazonaws.kinesis#ShardId", "traits": { - "smithy.api#documentation": "The shard ID of the shard to start with.
\nSpecify this parameter to indicate that you want to describe the stream starting with\n the shard whose ID immediately follows ExclusiveStartShardId
.
If you don't specify this parameter, the default behavior for\n DescribeStream
is to describe the stream starting with the first shard\n in the stream.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "The shard ID of the shard to start with.
\nSpecify this parameter to indicate that you want to describe the stream starting with\n the shard whose ID immediately follows ExclusiveStartShardId
.
If you don't specify this parameter, the default behavior for\n DescribeStream
is to describe the stream starting with the first shard\n in the stream.
Provides a summarized description of the specified Kinesis data stream without the\n shard list.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
The information returned includes the stream name, Amazon Resource Name (ARN), status,\n record retention period, approximate creation time, monitoring, encryption details, and\n open shard count.
\n\n DescribeStreamSummary has a limit of 20 transactions per second per\n account.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Provides a summarized description of the specified Kinesis data stream without the\n shard list.
\nThe information returned includes the stream name, Amazon Resource Name (ARN), status,\n record retention period, approximate creation time, monitoring, encryption details, and\n open shard count.
\n\n DescribeStreamSummary has a limit of 20 transactions per second per\n account.
" } }, "com.amazonaws.kinesis#DescribeStreamSummaryInput": { @@ -800,16 +681,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream to describe.
" - } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "The name of the stream to describe.
", + "smithy.api#required": {} } } } @@ -835,9 +708,6 @@ "target": "com.amazonaws.kinesis#EnhancedMonitoringOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -852,12 +722,7 @@ } ], "traits": { - "smithy.api#documentation": "Disables enhanced monitoring.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Disables enhanced monitoring.
" } }, "com.amazonaws.kinesis#DisableEnhancedMonitoringInput": { @@ -866,24 +731,16 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the Kinesis data stream for which to disable enhanced monitoring.
" + "smithy.api#documentation": "The name of the Kinesis data stream for which to disable enhanced monitoring.
", + "smithy.api#required": {} } }, "ShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "List of shard-level metrics to disable.
\nThe following are the valid shard-level metrics. The value \"ALL
\" disables\n every metric.
\n IncomingBytes
\n
\n IncomingRecords
\n
\n OutgoingBytes
\n
\n OutgoingRecords
\n
\n WriteProvisionedThroughputExceeded
\n
\n ReadProvisionedThroughputExceeded
\n
\n IteratorAgeMilliseconds
\n
\n ALL
\n
For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.
", + "smithy.api#documentation": "List of shard-level metrics to disable.
\nThe following are the valid shard-level metrics. The value \"ALL
\" disables\n every metric.
\n IncomingBytes
\n
\n IncomingRecords
\n
\n OutgoingBytes
\n
\n OutgoingRecords
\n
\n WriteProvisionedThroughputExceeded
\n
\n ReadProvisionedThroughputExceeded
\n
\n IteratorAgeMilliseconds
\n
\n ALL
\n
For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -899,9 +756,6 @@ "target": "com.amazonaws.kinesis#EnhancedMonitoringOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -916,12 +770,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables enhanced Kinesis data stream monitoring for shard-level metrics.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Enables enhanced Kinesis data stream monitoring for shard-level metrics.
" } }, "com.amazonaws.kinesis#EnableEnhancedMonitoringInput": { @@ -930,24 +779,16 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream for which to enable enhanced monitoring.
" + "smithy.api#documentation": "The name of the stream for which to enable enhanced monitoring.
", + "smithy.api#required": {} } }, "ShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "List of shard-level metrics to enable.
\nThe following are the valid shard-level metrics. The value \"ALL
\" enables\n every metric.
\n IncomingBytes
\n
\n IncomingRecords
\n
\n OutgoingBytes
\n
\n OutgoingRecords
\n
\n WriteProvisionedThroughputExceeded
\n
\n ReadProvisionedThroughputExceeded
\n
\n IteratorAgeMilliseconds
\n
\n ALL
\n
For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.
", + "smithy.api#documentation": "List of shard-level metrics to enable.
\nThe following are the valid shard-level metrics. The value \"ALL
\" enables\n every metric.
\n IncomingBytes
\n
\n IncomingRecords
\n
\n OutgoingBytes
\n
\n OutgoingRecords
\n
\n WriteProvisionedThroughputExceeded
\n
\n ReadProvisionedThroughputExceeded
\n
\n IteratorAgeMilliseconds
\n
\n ALL
\n
For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -955,20 +796,18 @@ } }, "com.amazonaws.kinesis#EncryptionType": { - "type": "enum", - "members": { - "NONE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NONE" - } - }, - "KMS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KMS" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "KMS", + "name": "KMS" } - } + ] } }, "com.amazonaws.kinesis#EnhancedMetrics": { @@ -977,7 +816,7 @@ "ShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "List of shard-level metrics.
\nThe following are the valid shard-level metrics. The value \"ALL
\" enhances\n every metric.
\n IncomingBytes
\n
\n IncomingRecords
\n
\n OutgoingBytes
\n
\n OutgoingRecords
\n
\n WriteProvisionedThroughputExceeded
\n
\n ReadProvisionedThroughputExceeded
\n
\n IteratorAgeMilliseconds
\n
\n ALL
\n
For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.
" + "smithy.api#documentation": "List of shard-level metrics.
\nThe following are the valid shard-level metrics. The value \"ALL
\" enhances\n every metric.
\n IncomingBytes
\n
\n IncomingRecords
\n
\n OutgoingBytes
\n
\n OutgoingRecords
\n
\n WriteProvisionedThroughputExceeded
\n
\n ReadProvisionedThroughputExceeded
\n
\n IteratorAgeMilliseconds
\n
\n ALL
\n
For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.
" } } }, @@ -1011,12 +850,6 @@ "traits": { "smithy.api#documentation": "Represents the list of all the metrics that would be in the enhanced state after the\n operation.
" } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
" - } } }, "traits": { @@ -1065,9 +898,6 @@ "target": "com.amazonaws.kinesis#GetRecordsOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#ExpiredIteratorException" }, @@ -1100,12 +930,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets data records from a Kinesis data stream's shard.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter in addition to the ShardIterator
parameter.
Specify a shard iterator using the ShardIterator
parameter. The shard\n iterator specifies the position in the shard from which you want to start reading data\n records sequentially. If there are no records available in the portion of the shard that\n the iterator points to, GetRecords returns an empty list. It might\n take multiple calls to get to a portion of the shard that contains records.
You can scale by provisioning multiple shards per stream while considering service\n limits (for more information, see Amazon Kinesis Data Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide). Your application should have one thread per shard, each reading\n continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the\n shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in\n NextShardIterator
. Specify the shard iterator returned in\n NextShardIterator
in subsequent calls to GetRecords.\n If the shard has been closed, the shard iterator can't return more data and GetRecords returns null
in NextShardIterator
.\n You can terminate the loop when the shard is closed, or when the shard iterator reaches\n the record with the sequence number or other attribute that marks it as the last record\n to process.
Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per\n second. You can ensure that your calls don't exceed the maximum supported size or\n throughput by using the Limit
parameter to specify the maximum number of\n records that GetRecords can return. Consider your average record size\n when determining this limit. The maximum number of records that can be returned per call\n is 10,000.
The size of the data returned by GetRecords varies depending on the\n utilization of the shard. It is recommended that consumer applications retrieve records\n via the GetRecords
command using the 5 TPS limit to remain caught up.\n Retrieving records less frequently can lead to consumer applications falling behind. The\n maximum size of data that GetRecords can return is 10 MiB. If a call\n returns this amount of data, subsequent calls made within the next 5 seconds throw\n ProvisionedThroughputExceededException
. If there is insufficient\n provisioned throughput on the stream, subsequent calls made within the next 1 second\n throw ProvisionedThroughputExceededException
. GetRecords\n doesn't return any data when it throws an exception. For this reason, we recommend that\n you wait 1 second between calls to GetRecords. However, it's possible\n that the application will get exceptions for longer than 1 second.
To detect whether the application is falling behind in processing, you can use the\n MillisBehindLatest
response attribute. You can also monitor the stream\n using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon\n Kinesis Data Streams Developer Guide).
Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp
,\n that is set when a stream successfully receives and stores a record. This is commonly\n referred to as a server-side time stamp, whereas a client-side time stamp is set when a\n data producer creates or sends the record to a stream (a data producer is any data\n source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time\n stamp accuracy, or that the time stamp is always increasing. For example, records in a\n shard or across a stream might have time stamps that are out of order.
This operation has a limit of five transactions per second per shard.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "data" - } - } + "smithy.api#documentation": "Gets data records from a Kinesis data stream's shard.
\nSpecify a shard iterator using the ShardIterator
parameter. The shard\n iterator specifies the position in the shard from which you want to start reading data\n records sequentially. If there are no records available in the portion of the shard that\n the iterator points to, GetRecords returns an empty list. It might\n take multiple calls to get to a portion of the shard that contains records.
You can scale by provisioning multiple shards per stream while considering service\n limits (for more information, see Amazon Kinesis Data Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide). Your application should have one thread per shard, each reading\n continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the\n shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in\n NextShardIterator
. Specify the shard iterator returned in\n NextShardIterator
in subsequent calls to GetRecords.\n If the shard has been closed, the shard iterator can't return more data and GetRecords returns null
in NextShardIterator
.\n You can terminate the loop when the shard is closed, or when the shard iterator reaches\n the record with the sequence number or other attribute that marks it as the last record\n to process.
Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per\n second. You can ensure that your calls don't exceed the maximum supported size or\n throughput by using the Limit
parameter to specify the maximum number of\n records that GetRecords can return. Consider your average record size\n when determining this limit. The maximum number of records that can be returned per call\n is 10,000.
The size of the data returned by GetRecords varies depending on the\n utilization of the shard. It is recommended that consumer applications retrieve records\n via the GetRecords
command using the 5 TPS limit to remain caught up.\n Retrieving records less frequently can lead to consumer applications falling behind. The\n maximum size of data that GetRecords can return is 10 MiB. If a call\n returns this amount of data, subsequent calls made within the next 5 seconds throw\n ProvisionedThroughputExceededException
. If there is insufficient\n provisioned throughput on the stream, subsequent calls made within the next 1 second\n throw ProvisionedThroughputExceededException
. GetRecords\n doesn't return any data when it throws an exception. For this reason, we recommend that\n you wait 1 second between calls to GetRecords. However, it's possible\n that the application will get exceptions for longer than 1 second.
To detect whether the application is falling behind in processing, you can use the\n MillisBehindLatest
response attribute. You can also monitor the stream\n using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon\n Kinesis Data Streams Developer Guide).
Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp
,\n that is set when a stream successfully receives and stores a record. This is commonly\n referred to as a server-side time stamp, whereas a client-side time stamp is set when a\n data producer creates or sends the record to a stream (a data producer is any data\n source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time\n stamp accuracy, or that the time stamp is always increasing. For example, records in a\n shard or across a stream might have time stamps that are out of order.
This operation has a limit of five transactions per second per shard.
" } }, "com.amazonaws.kinesis#GetRecordsInput": { @@ -1123,15 +948,6 @@ "traits": { "smithy.api#documentation": "The maximum number of records to return. Specify a value of up to 10,000. If you\n specify a value that is greater than 10,000, GetRecords throws\n InvalidArgumentException
. The default value is 10,000.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -1189,9 +1005,6 @@ "target": "com.amazonaws.kinesis#GetShardIteratorOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -1203,12 +1016,7 @@ } ], "traits": { - "smithy.api#documentation": "Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is\n returned to the requester.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
A shard iterator specifies the shard position from which to start reading data records\n sequentially. The position is specified using the sequence number of a data record in a\n shard. A sequence number is the identifier associated with every record ingested in the\n stream, and is assigned when a record is put into the stream. Each stream has one or\n more shards.
\nYou must specify the shard iterator type. For example, you can set the\n ShardIteratorType
parameter to read exactly from the position denoted\n by a specific sequence number by using the AT_SEQUENCE_NUMBER
shard\n iterator type. Alternatively, the parameter can read right after the sequence number by\n using the AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence numbers\n returned by earlier calls to PutRecord, PutRecords,\n GetRecords, or DescribeStream. In the request,\n you can specify the shard iterator type AT_TIMESTAMP
to read records from\n an arbitrary point in time, TRIM_HORIZON
to cause\n ShardIterator
to point to the last untrimmed record in the shard in the\n system (the oldest data record in the shard), or LATEST
so that you always\n read the most recent data in the shard.
When you read repeatedly from a stream, use a GetShardIterator\n request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator
. A new shard\n iterator is returned by every GetRecords request in\n NextShardIterator
, which you use in the ShardIterator
\n parameter of the next GetRecords request.
If a GetShardIterator request is made too often, you receive a\n ProvisionedThroughputExceededException
. For more information about\n throughput limits, see GetRecords, and Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide.
If the shard is closed, GetShardIterator returns a valid iterator\n for the last sequence number of the shard. A shard can be closed as a result of using\n SplitShard or MergeShards.
\n\n GetShardIterator has a limit of five transactions per second per\n account per open shard.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "data" - } - } + "smithy.api#documentation": "Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is\n returned to the requester.
\nA shard iterator specifies the shard position from which to start reading data records\n sequentially. The position is specified using the sequence number of a data record in a\n shard. A sequence number is the identifier associated with every record ingested in the\n stream, and is assigned when a record is put into the stream. Each stream has one or\n more shards.
\nYou must specify the shard iterator type. For example, you can set the\n ShardIteratorType
parameter to read exactly from the position denoted\n by a specific sequence number by using the AT_SEQUENCE_NUMBER
shard\n iterator type. Alternatively, the parameter can read right after the sequence number by\n using the AFTER_SEQUENCE_NUMBER
shard iterator type, using sequence numbers\n returned by earlier calls to PutRecord, PutRecords,\n GetRecords, or DescribeStream. In the request,\n you can specify the shard iterator type AT_TIMESTAMP
to read records from\n an arbitrary point in time, TRIM_HORIZON
to cause\n ShardIterator
to point to the last untrimmed record in the shard in the\n system (the oldest data record in the shard), or LATEST
so that you always\n read the most recent data in the shard.
When you read repeatedly from a stream, use a GetShardIterator\n request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator
. A new shard\n iterator is returned by every GetRecords request in\n NextShardIterator
, which you use in the ShardIterator
\n parameter of the next GetRecords request.
If a GetShardIterator request is made too often, you receive a\n ProvisionedThroughputExceededException
. For more information about\n throughput limits, see GetRecords, and Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide.
If the shard is closed, GetShardIterator returns a valid iterator\n for the last sequence number of the shard. A shard can be closed as a result of using\n SplitShard or MergeShards.
\n\n GetShardIterator has a limit of five transactions per second per\n account per open shard.
" } }, "com.amazonaws.kinesis#GetShardIteratorInput": { @@ -1217,7 +1025,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the Amazon Kinesis data stream.
" + "smithy.api#documentation": "The name of the Amazon Kinesis data stream.
", + "smithy.api#required": {} } }, "ShardId": { @@ -1230,7 +1039,7 @@ "ShardIteratorType": { "target": "com.amazonaws.kinesis#ShardIteratorType", "traits": { - "smithy.api#documentation": "Determines how the shard iterator is used to start reading data records from the\n shard.
\nThe following are the valid Amazon Kinesis shard iterator types:
\nAT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific\n sequence number, provided in the value\n StartingSequenceNumber
.
AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a\n specific sequence number, provided in the value\n StartingSequenceNumber
.
AT_TIMESTAMP - Start reading from the position denoted by a specific time\n stamp, provided in the value Timestamp
.
TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the\n system, which is the oldest data record in the shard.
\nLATEST - Start reading just after the most recent record in the shard, so that\n you always read the most recent data in the shard.
\nDetermines how the shard iterator is used to start reading data records from the\n shard.
\nThe following are the valid Amazon Kinesis shard iterator types:
\nAT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific\n sequence number, provided in the value\n StartingSequenceNumber
.
AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a\n specific sequence number, provided in the value\n StartingSequenceNumber
.
AT_TIMESTAMP - Start reading from the position denoted by a specific time\n stamp, provided in the value Timestamp
.
TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the\n system, which is the oldest data record in the shard.
\nLATEST - Start reading just after the most recent record in the shard, so that\n you always read the most recent data in the shard.
\nThe time stamp of the data record from which to start reading. Used with shard\n iterator type AT_TIMESTAMP. A time stamp is the Unix epoch date with precision in\n milliseconds. For example, 2016-04-04T19:58:46.480-00:00
or\n 1459799926.480
. If a record with this exact time stamp does not exist,\n the iterator returned is for the next (later) record. If the time stamp is older than\n the current trim horizon, the iterator returned is for the oldest untrimmed data record\n (TRIM_HORIZON).
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -1311,9 +1111,6 @@ "target": "smithy.api#Unit" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -1328,12 +1125,7 @@ } ], "traits": { - "smithy.api#documentation": "Increases the Kinesis data stream's retention period, which is the length of time data\n records are accessible after they are added to the stream. The maximum value of a\n stream's retention period is 8760 hours (365 days).
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
If you choose a longer stream retention period, this operation increases the time\n period during which records that have not yet expired are accessible. However, it does\n not make previous, expired data (older than the stream's previous retention period)\n accessible after the operation has been called. For example, if a stream's retention\n period is set to 24 hours and is increased to 168 hours, any data that is older than 24\n hours remains inaccessible to consumer applications.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Increases the Kinesis data stream's retention period, which is the length of time data\n records are accessible after they are added to the stream. The maximum value of a\n stream's retention period is 8760 hours (365 days).
\nIf you choose a longer stream retention period, this operation increases the time\n period during which records that have not yet expired are accessible. However, it does\n not make previous, expired data (older than the stream's previous retention period)\n accessible after the operation has been called. For example, if a stream's retention\n period is set to 24 hours and is increased to 168 hours, any data that is older than 24\n hours remains inaccessible to consumer applications.
" } }, "com.amazonaws.kinesis#IncreaseStreamRetentionPeriodInput": { @@ -1342,7 +1134,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream to modify.
" + "smithy.api#documentation": "The name of the stream to modify.
", + "smithy.api#required": {} } }, "RetentionPeriodHours": { @@ -1351,15 +1144,6 @@ "smithy.api#documentation": "The new retention period of the stream, in hours. Must be more than the current\n retention period.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -1604,17 +1388,17 @@ "h2" ] }, - "smithy.api#documentation": "Amazon Kinesis Data Streams is a managed service that scales elastically for real-time\n processing of streaming big data.
", + "smithy.api#documentation": "Amazon Kinesis Data Streams is a managed service that scales elastically for real-time\n processing of streaming big data.
", "smithy.api#title": "Amazon Kinesis", "smithy.api#xmlNamespace": { "uri": "http://kinesis.amazonaws.com/doc/2013-12-02" }, "smithy.rules#endpointRuleSet": { - "version": "1.3", + "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -1637,21 +1421,6 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "String" - }, - "StreamARN": { - "required": false, - "documentation": "The ARN of the Kinesis stream", - "type": "String" - }, - "OperationType": { - "required": false, - "documentation": "Internal parameter to distinguish between Control/Data plane API and accordingly generate control/data plane endpoint", - "type": "String" - }, - "ConsumerARN": { - "required": false, - "documentation": "The ARN of the Kinesis consumer", - "type": "String" } }, "rules": [ @@ -1675,1115 +1444,66 @@ "fn": "isSet", "argv": [ { - "ref": "StreamARN" + "ref": "Endpoint" } ] }, { - "fn": "not", + "fn": "parseURL", "argv": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] + "ref": "Endpoint" } - ] - }, + ], + "assign": "url" + } + ], + "type": "tree", + "rules": [ { - "fn": "not", - "argv": [ + "conditions": [ { - "fn": "stringEquals", + "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] + "ref": "UseFIPS" }, - "aws-iso" + true ] } - ] + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" }, { - "fn": "not", - "argv": [ + "conditions": [], + "type": "tree", + "rules": [ { - "fn": "stringEquals", - "argv": [ + "conditions": [ { - "fn": "getAttr", + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "ref": "UseDualStack" }, - "name" + true ] - }, - "aws-iso-b" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.parseArn", - "argv": [ - { - "ref": "StreamARN" } ], - "assign": "arn" - } - ], - "type": "tree", - "rules": [ + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, { "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "region" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "service" - ] - }, - "kinesis" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "resourceId[0]" - ], - "assign": "arnType" - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "stream" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "{arn#partition}" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "OperationType" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled, but this partition does not support DualStack.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled, but this partition does not support FIPS.", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Operation Type is not set. Please contact service team for resolution.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Partition: {arn#partition} from ARN doesn't match with partition name: {PartitionResult#name}.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Kinesis ARNs don't support `{arnType}` arn types.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: No ARN type specified", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The ARN was not for the Kinesis service, found: {arn#service}.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Invalid region.", - "type": "error" - } - ] - } - ] + "endpoint": { + "url": { + "ref": "Endpoint" }, - { - "conditions": [], - "error": "Invalid ARN: Invalid account id.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Failed to parse ARN.", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "ConsumerARN" - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso" - ] - } - ] - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "aws-iso-b" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "aws.parseArn", - "argv": [ - { - "ref": "ConsumerARN" - } - ], - "assign": "arn" - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "accountId" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isValidHostLabel", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "region" - ] - }, - false - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "service" - ] - }, - "kinesis" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "arn" - }, - "resourceId[0]" - ], - "assign": "arnType" - }, - { - "fn": "not", - "argv": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "" - ] - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "arnType" - }, - "stream" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "name" - ] - }, - "{arn#partition}" - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "OperationType" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled, but this partition does not support DualStack.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled, but this partition does not support FIPS.", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - }, - true - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ] - }, - { - "conditions": [], - "endpoint": { - "url": "https://{arn#accountId}.{OperationType}-kinesis.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Operation Type is not set. Please contact service team for resolution.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Partition: {arn#partition} from ARN doesn't match with partition name: {PartitionResult#name}.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Kinesis ARNs don't support `{arnType}` arn types.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: No ARN type specified", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: The ARN was not for the Kinesis service, found: {arn#service}.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Invalid region.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Invalid account id.", - "type": "error" - } - ] - } - ] - }, - { - "conditions": [], - "error": "Invalid ARN: Failed to parse ARN.", - "type": "error" - } - ] - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "type": "tree", - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ] } @@ -2999,8 +1719,8 @@ }, "params": { "Region": "ap-south-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3012,8 +1732,8 @@ }, "params": { "Region": "ap-south-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3025,8 +1745,8 @@ }, "params": { "Region": "ap-south-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3038,8 +1758,8 @@ }, "params": { "Region": "ap-south-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3051,8 +1771,8 @@ }, "params": { "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3064,8 +1784,8 @@ }, "params": { "Region": "ap-south-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3077,8 +1797,8 @@ }, "params": { "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3090,8 +1810,8 @@ }, "params": { "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3103,8 +1823,8 @@ }, "params": { "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3116,8 +1836,8 @@ }, "params": { "Region": "eu-south-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3129,8 +1849,8 @@ }, "params": { "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3142,8 +1862,8 @@ }, "params": { "Region": "eu-south-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3155,8 +1875,8 @@ }, "params": { "Region": "eu-south-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3168,8 +1888,8 @@ }, "params": { "Region": "eu-south-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3181,8 +1901,8 @@ }, "params": { "Region": "eu-south-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3194,8 +1914,8 @@ }, "params": { "Region": "eu-south-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3207,8 +1927,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3220,8 +1940,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3233,8 +1953,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3246,8 +1966,8 @@ }, "params": { "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3259,8 +1979,8 @@ }, "params": { "Region": "me-central-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3272,8 +1992,8 @@ }, "params": { "Region": "me-central-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3285,8 +2005,8 @@ }, "params": { "Region": "me-central-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3298,8 +2018,8 @@ }, "params": { "Region": "me-central-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3311,8 +2031,8 @@ }, "params": { "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3324,8 +2044,8 @@ }, "params": { "Region": "ca-central-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3337,8 +2057,8 @@ }, "params": { "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3350,8 +2070,8 @@ }, "params": { "Region": "ca-central-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3363,8 +2083,8 @@ }, "params": { "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3376,8 +2096,8 @@ }, "params": { "Region": "eu-central-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3389,8 +2109,8 @@ }, "params": { "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3402,8 +2122,8 @@ }, "params": { "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3413,8 +2133,8 @@ }, "params": { "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3426,8 +2146,8 @@ }, "params": { "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3437,8 +2157,8 @@ }, "params": { "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3450,8 +2170,8 @@ }, "params": { "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3463,8 +2183,8 @@ }, "params": { "Region": "eu-central-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3476,8 +2196,8 @@ }, "params": { "Region": "eu-central-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3489,8 +2209,8 @@ }, "params": { "Region": "eu-central-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3502,8 +2222,8 @@ }, "params": { "Region": "eu-central-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3515,8 +2235,8 @@ }, "params": { "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3528,8 +2248,8 @@ }, "params": { "Region": "us-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3541,8 +2261,8 @@ }, "params": { "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3554,8 +2274,8 @@ }, "params": { "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3567,8 +2287,8 @@ }, "params": { "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3580,8 +2300,8 @@ }, "params": { "Region": "us-west-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3593,8 +2313,8 @@ }, "params": { "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3606,8 +2326,8 @@ }, "params": { "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3619,8 +2339,8 @@ }, "params": { "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3632,8 +2352,8 @@ }, "params": { "Region": "af-south-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3645,8 +2365,8 @@ }, "params": { "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3658,8 +2378,8 @@ }, "params": { "Region": "af-south-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3671,8 +2391,8 @@ }, "params": { "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3684,8 +2404,8 @@ }, "params": { "Region": "eu-north-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3697,8 +2417,8 @@ }, "params": { "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3710,8 +2430,8 @@ }, "params": { "Region": "eu-north-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3723,8 +2443,8 @@ }, "params": { "Region": "eu-west-3", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3736,8 +2456,8 @@ }, "params": { "Region": "eu-west-3", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3749,8 +2469,8 @@ }, "params": { "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3762,8 +2482,8 @@ }, "params": { "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3775,8 +2495,8 @@ }, "params": { "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3788,8 +2508,8 @@ }, "params": { "Region": "eu-west-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3801,8 +2521,8 @@ }, "params": { "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3814,8 +2534,8 @@ }, "params": { "Region": "eu-west-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3827,8 +2547,8 @@ }, "params": { "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3840,8 +2560,8 @@ }, "params": { "Region": "eu-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3853,8 +2573,8 @@ }, "params": { "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3866,8 +2586,8 @@ }, "params": { "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3879,8 +2599,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3892,8 +2612,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3905,8 +2625,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3918,8 +2638,8 @@ }, "params": { "Region": "ap-northeast-3", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3931,8 +2651,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3944,8 +2664,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -3957,8 +2677,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -3970,8 +2690,8 @@ }, "params": { "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -3983,8 +2703,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -3996,8 +2716,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4009,8 +2729,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4022,8 +2742,8 @@ }, "params": { "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4035,8 +2755,8 @@ }, "params": { "Region": "me-south-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4048,8 +2768,8 @@ }, "params": { "Region": "me-south-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4061,8 +2781,8 @@ }, "params": { "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4074,8 +2794,8 @@ }, "params": { "Region": "me-south-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4087,8 +2807,8 @@ }, "params": { "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4100,8 +2820,8 @@ }, "params": { "Region": "sa-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4113,8 +2833,8 @@ }, "params": { "Region": "sa-east-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4126,8 +2846,8 @@ }, "params": { "Region": "sa-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4139,8 +2859,8 @@ }, "params": { "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4152,8 +2872,8 @@ }, "params": { "Region": "ap-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4165,8 +2885,8 @@ }, "params": { "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4178,8 +2898,8 @@ }, "params": { "Region": "ap-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4191,8 +2911,8 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4204,8 +2924,8 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4217,8 +2937,8 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4230,8 +2950,8 @@ }, "params": { "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4243,8 +2963,8 @@ }, "params": { "Region": "ca-west-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4256,8 +2976,8 @@ }, "params": { "Region": "ca-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4269,8 +2989,8 @@ }, "params": { "Region": "ca-west-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4282,8 +3002,8 @@ }, "params": { "Region": "ca-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4295,8 +3015,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4308,8 +3028,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4321,8 +3041,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4334,8 +3054,8 @@ }, "params": { "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4347,8 +3067,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4360,8 +3080,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4373,8 +3093,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4386,8 +3106,8 @@ }, "params": { "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4399,8 +3119,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4412,8 +3132,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4425,8 +3145,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4438,8 +3158,8 @@ }, "params": { "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4449,8 +3169,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4462,8 +3182,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4473,8 +3193,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4486,8 +3206,8 @@ }, "params": { "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4499,8 +3219,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4512,8 +3232,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4525,8 +3245,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4538,8 +3258,8 @@ }, "params": { "Region": "ap-southeast-3", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4551,8 +3271,8 @@ }, "params": { "Region": "ap-southeast-4", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4564,8 +3284,8 @@ }, "params": { "Region": "ap-southeast-4", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4577,8 +3297,8 @@ }, "params": { "Region": "ap-southeast-4", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4590,8 +3310,8 @@ }, "params": { "Region": "ap-southeast-4", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4603,8 +3323,8 @@ }, "params": { "Region": "ap-southeast-5", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4616,8 +3336,8 @@ }, "params": { "Region": "ap-southeast-5", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4629,8 +3349,8 @@ }, "params": { "Region": "ap-southeast-5", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4642,8 +3362,8 @@ }, "params": { "Region": "ap-southeast-5", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4655,8 +3375,8 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4668,8 +3388,8 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4681,8 +3401,8 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4694,8 +3414,8 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4707,8 +3427,8 @@ }, "params": { "Region": "us-east-2", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4720,8 +3440,8 @@ }, "params": { "Region": "us-east-2", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4733,8 +3453,8 @@ }, "params": { "Region": "us-east-2", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4746,8 +3466,8 @@ }, "params": { "Region": "us-east-2", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4759,8 +3479,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4772,8 +3492,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4785,8 +3505,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4798,8 +3518,8 @@ }, "params": { "Region": "cn-northwest-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4809,8 +3529,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -4822,8 +3542,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -4833,8 +3553,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -4846,8 +3566,8 @@ }, "params": { "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -4859,8 +3579,8 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -4871,8 +3591,8 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": true, "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -4883,792 +3603,9 @@ }, "params": { "Region": "us-east-1", - "UseFIPS": false, "UseDualStack": true, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Invalid ARN: Failed to parse ARN.", - "expect": { - "error": "Invalid ARN: Failed to parse ARN." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn" - } - }, - { - "documentation": "Invalid ARN: partition missing from ARN.", - "expect": { - "error": "Invalid ARN: Failed to parse ARN." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn::kinesis:us-west-2:123456789012:stream/testStream" - } - }, - { - "documentation": "Invalid ARN: partitions mismatch.", - "expect": { - "error": "Partition: aws from ARN doesn't match with partition name: aws-us-gov." - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis:us-west-2:123456789012:stream/testStream" - } - }, - { - "documentation": "Invalid ARN: Not Kinesis", - "expect": { - "error": "Invalid ARN: The ARN was not for the Kinesis service, found: s3." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:s3:us-west-2:123456789012:stream/testStream" - } - }, - { - "documentation": "Invalid ARN: Region is missing in ARN", - "expect": { - "error": "Invalid ARN: Invalid region." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis::123456789012:stream/testStream" - } - }, - { - "documentation": "Invalid ARN: Region is empty string in ARN", - "expect": { - "error": "Invalid ARN: Invalid region." - }, - "params": { - "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis: :123456789012:stream/testStream" - } - }, - { - "documentation": "Invalid ARN: Invalid account id", - "expect": { - "error": "Invalid ARN: Invalid account id." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis:us-east-1::stream/testStream", - "OperationType": "control" - } - }, - { - "documentation": "Invalid ARN: Invalid account id", - "expect": { - "error": "Invalid ARN: Invalid account id." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis:us-east-1: :stream/testStream", - "OperationType": "control" - } - }, - { - "documentation": "Invalid ARN: Kinesis ARNs only support stream arn types", - "expect": { - "error": "Invalid ARN: Kinesis ARNs don't support `accesspoint` arn types." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis:us-east-1:123:accesspoint/testStream" - } - }, - { - "documentation": "Dual Stack not supported region.", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": true, - "StreamARN": "arn:aws-iso:kinesis:us-iso-west-1:123456789012:stream/testStream", - "OperationType": "control" - } - }, - { - "documentation": "OperationType not set", - "expect": { - "error": "Operation Type is not set. Please contact service team for resolution." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "StreamARN": "arn:aws:kinesis:us-east-1:123456789012:stream/testStream" - } - }, - { - "documentation": "Custom Endpoint is specified", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream", - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Account endpoint targeting control operation type", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint targeting data operation type", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with fips targeting data operation type", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with fips targeting control operation type", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with Dual Stack and FIPS enabled", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true, - "OperationType": "control", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with Dual Stack enabled", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": true, - "OperationType": "data", - "StreamARN": "arn:aws:kinesis:us-west-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with FIPS and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws:kinesis:us-west-1:123:stream/test-stream" - } - }, - { - "documentation": "RegionMismatch: client region should be used for endpoint region", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws:kinesis:us-west-1:123:stream/testStream" - } - }, - { - "documentation": "Account endpoint with FIPS enabled", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws-cn:kinesis:cn-northwest-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with FIPS and DualStack enabled for cn regions.", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseFIPS": true, - "UseDualStack": true, - "OperationType": "data", - "StreamARN": "arn:aws-cn:kinesis:cn-northwest-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint targeting control operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws-iso:kinesis:us-iso-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint targeting control operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws-iso:kinesis:us-iso-west-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint targeting data operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with fips targeting control operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "control", - "StreamARN": "arn:aws-iso:kinesis:us-iso-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Account endpoint with fips targeting data operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream" - } - }, - { - "documentation": "Invalid ConsumerARN: Failed to parse ARN.", - "expect": { - "error": "Invalid ARN: Failed to parse ARN." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn" - } - }, - { - "documentation": "Invalid ConsumerARN: partition missing from ARN.", - "expect": { - "error": "Invalid ARN: Failed to parse ARN." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn::kinesis:us-west-2:123456789012:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Invalid ARN: partitions mismatch.", - "expect": { - "error": "Partition: aws from ARN doesn't match with partition name: aws-us-gov." - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis:us-west-2:123456789012:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Invalid ARN: Not Kinesis", - "expect": { - "error": "Invalid ARN: The ARN was not for the Kinesis service, found: s3." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:s3:us-west-2:123456789012:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Invalid ARN: Region is missing in ARN", - "expect": { - "error": "Invalid ARN: Invalid region." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis::123456789012:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Invalid ARN: Region is empty string in ARN", - "expect": { - "error": "Invalid ARN: Invalid region." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis: :123456789012:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Invalid ARN: Invalid account id", - "expect": { - "error": "Invalid ARN: Invalid account id." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis:us-east-1::stream/testStream/consumer/test-consumer:1525898737", - "OperationType": "control" - } - }, - { - "documentation": "Invalid ARN: Invalid account id", - "expect": { - "error": "Invalid ARN: Invalid account id." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis:us-east-1: :stream/testStream/consumer/test-consumer:1525898737", - "OperationType": "control" - } - }, - { - "documentation": "Invalid ARN: Kinesis ARNs only support stream arn/consumer arn types", - "expect": { - "error": "Invalid ARN: Kinesis ARNs don't support `accesspoint` arn types." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:accesspoint/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Dual Stack not supported region.", - "expect": { - "error": "FIPS and DualStack are enabled, but this partition does not support one or both" - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": true, - "UseDualStack": true, - "ConsumerARN": "arn:aws-iso:kinesis:us-iso-west-1:123456789012:stream/testStream/consumer/test-consumer:1525898737", - "OperationType": "control" - } - }, - { - "documentation": "OperationType not set", - "expect": { - "error": "Operation Type is not set. Please contact service team for resolution." - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "ConsumerARN": "arn:aws:kinesis:us-east-1:123456789012:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Custom Endpoint is specified", - "expect": { - "endpoint": { - "url": "https://example.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737", - "Endpoint": "https://example.com" - } - }, - { - "documentation": "Account endpoint targeting control operation type", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint targeting data operation type", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with fips targeting data operation type", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with fips targeting control operation type", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis-fips.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with Dual Stack and FIPS enabled", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true, - "OperationType": "control", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with Dual Stack enabled", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-west-1.api.aws" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": true, - "OperationType": "data", - "ConsumerARN": "arn:aws:kinesis:us-west-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with FIPS and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://123.control-kinesis.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws:kinesis:us-west-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "RegionMismatch: client region should be used for endpoint region", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws:kinesis:us-west-1:123:stream/testStream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with FIPS enabled", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis-fips.cn-northwest-1.amazonaws.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws-cn:kinesis:cn-northwest-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with FIPS and DualStack enabled for cn regions.", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseFIPS": true, - "UseDualStack": true, - "OperationType": "data", - "ConsumerARN": "arn:aws-cn:kinesis:cn-northwest-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint targeting control operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws-iso:kinesis:us-iso-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint targeting control operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis.us-iso-west-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-west-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws-iso:kinesis:us-iso-west-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint targeting data operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with fips targeting control operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis-fips.us-iso-east-1.c2s.ic.gov" - } - }, - "params": { - "Region": "us-iso-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "control", - "ConsumerARN": "arn:aws-iso:kinesis:us-iso-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Account endpoint with fips targeting data operation type in ADC regions", - "expect": { - "endpoint": { - "url": "https://kinesis-fips.us-isob-east-1.sc2s.sgov.gov" - } - }, - "params": { - "Region": "us-isob-east-1", - "UseFIPS": true, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws-iso-b:kinesis:us-isob-east-1:123:stream/test-stream/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "ConsumerARN targeting US-EAST-1", - "expect": { - "endpoint": { - "url": "https://123456789123.data-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123456789123:stream/foobar/consumer/test-consumer:1525898737" - } - }, - { - "documentation": "Both StreamARN and ConsumerARN specified. StreamARN should take precedence", - "expect": { - "endpoint": { - "url": "https://123.data-kinesis.us-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false, - "OperationType": "data", - "StreamARN": "arn:aws:kinesis:us-east-1:123:stream/foobar", - "ConsumerARN": "arn:aws:kinesis:us-east-1:123456789123:stream/foobar/consumer/test-consumer:1525898737" + "Endpoint": "https://example.com" } } ], @@ -5700,9 +3637,6 @@ "target": "com.amazonaws.kinesis#ListShardsOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#ExpiredNextTokenException" }, @@ -5720,12 +3654,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the shards in a stream and provides information about each shard. This operation\n has a limit of 1000 transactions per second per data stream.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
This action does not list expired shards. For information about expired shards, see\n Data Routing, Data Persistence, and Shard State after a Reshard.
\nThis API is a new operation that is used by the Amazon Kinesis Client Library\n (KCL). If you have a fine-grained IAM policy that only allows specific operations,\n you must update your policy to allow calls to this API. For more information, see\n Controlling Access to Amazon Kinesis Data Streams Resources Using\n IAM.
\nLists the shards in a stream and provides information about each shard. This operation\n has a limit of 1000 transactions per second per data stream.
\nThis action does not list expired shards. For information about expired shards, see\n Data Routing, Data Persistence, and Shard State after a Reshard.
\nThis API is a new operation that is used by the Amazon Kinesis Client Library\n (KCL). If you have a fine-grained IAM policy that only allows specific operations,\n you must update your policy to allow calls to this API. For more information, see\n Controlling Access to Amazon Kinesis Data Streams Resources Using\n IAM.
\nThe name of the data stream whose shards you want to list.
\nYou cannot specify this parameter if you specify the NextToken
\n parameter.
The name of the data stream whose shards you want to list.
\nYou cannot specify this parameter if you specify the NextToken
\n parameter.
When the number of shards in the data stream is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken
. You can specify\n this NextToken
value in a subsequent call to ListShards
to\n list the next set of shards.
Don't specify StreamName
or StreamCreationTimestamp
if you\n specify NextToken
because the latter unambiguously identifies the\n stream.
You can optionally specify a value for the MaxResults
parameter when you\n specify NextToken
. If you specify a MaxResults
value that is\n less than the number of shards that the operation returns if you don't specify\n MaxResults
, the response will contain a new NextToken
\n value. You can use the new NextToken
value in a subsequent call to the\n ListShards
operation.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to ListShards
, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards
, you get ExpiredNextTokenException
.
When the number of shards in the data stream is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken
. You can specify\n this NextToken
value in a subsequent call to ListShards
to\n list the next set of shards.
Don't specify StreamName
or StreamCreationTimestamp
if you\n specify NextToken
because the latter unambiguously identifies the\n stream.
You can optionally specify a value for the MaxResults
parameter when you\n specify NextToken
. If you specify a MaxResults
value that is\n less than the number of shards that the operation returns if you don't specify\n MaxResults
, the response will contain a new NextToken
\n value. You can use the new NextToken
value in a subsequent call to the\n ListShards
operation.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to ListShards
, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards
, you get ExpiredNextTokenException
.
Specify this parameter to indicate that you want to list the shards starting with the\n shard whose ID immediately follows ExclusiveStartShardId
.
If you don't specify this parameter, the default behavior is for\n ListShards
to list the shards starting with the first one in the\n stream.
You cannot specify this parameter if you specify NextToken
.
Specify this parameter to indicate that you want to list the shards starting with the\n shard whose ID immediately follows ExclusiveStartShardId
.
If you don't specify this parameter, the default behavior is for\n ListShards
to list the shards starting with the first one in the\n stream.
You cannot specify this parameter if you specify NextToken
.
The maximum number of shards to return in a single call to ListShards
.\n The maximum number of shards to return in a single call. The default value is 1000. If\n you specify a value greater than 1000, at most 1000 results are returned.
When the number of shards to be listed is greater than the value of\n MaxResults
, the response contains a NextToken
value that\n you can use in a subsequent call to ListShards
to list the next set of\n shards.
The maximum number of shards to return in a single call to ListShards
.\n The maximum number of shards to return in a single call. The default value is 1000. If\n you specify a value greater than 1000, at most 1000 results are returned.
When the number of shards to be listed is greater than the value of\n MaxResults
, the response contains a NextToken
value that\n you can use in a subsequent call to ListShards
to list the next set of\n shards.
Specify this input parameter to distinguish data streams that have the same name. For\n example, if you create a data stream and then delete it, and you later create another\n data stream with the same name, you can use this input parameter to specify which of the\n two streams you want to list the shards for.
\nYou cannot specify this parameter if you specify the NextToken
\n parameter.
Specify this input parameter to distinguish data streams that have the same name. For\n example, if you create a data stream and then delete it, and you later create another\n data stream with the same name, you can use this input parameter to specify which of the\n two streams you want to list the shards for.
\nYou cannot specify this parameter if you specify the NextToken
\n parameter.
Enables you to filter out the response of the ListShards
API. You can\n only specify one filter at a time.
If you use the ShardFilter
parameter when invoking the ListShards API,\n the Type
is the required property and must be specified. If you specify the\n AT_TRIM_HORIZON
, FROM_TRIM_HORIZON
, or\n AT_LATEST
types, you do not need to specify either the\n ShardId
or the Timestamp
optional properties.
If you specify the AFTER_SHARD_ID
type, you must also provide the value\n for the optional ShardId
property. The ShardId
property is\n identical in fuctionality to the ExclusiveStartShardId
parameter of the\n ListShards
API. When ShardId
property is specified, the\n response includes the shards starting with the shard whose ID immediately follows the\n ShardId
that you provided.
If you specify the AT_TIMESTAMP
or FROM_TIMESTAMP_ID
type,\n you must also provide the value for the optional Timestamp
property. If you\n specify the AT_TIMESTAMP type, then all shards that were open at the provided timestamp\n are returned. If you specify the FROM_TIMESTAMP type, then all shards starting from the\n provided timestamp to TIP are returned.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "Enables you to filter out the response of the ListShards
API. You can\n only specify one filter at a time.
If you use the ShardFilter
parameter when invoking the ListShards API,\n the Type
is the required property and must be specified. If you specify the\n AT_TRIM_HORIZON
, FROM_TRIM_HORIZON
, or\n AT_LATEST
types, you do not need to specify either the\n ShardId
or the Timestamp
optional properties.
If you specify the AFTER_SHARD_ID
type, you must also provide the value\n for the optional ShardId
property. The ShardId
property is\n identical in fuctionality to the ExclusiveStartShardId
parameter of the\n ListShards
API. When ShardId
property is specified, the\n response includes the shards starting with the shard whose ID immediately follows the\n ShardId
that you provided.
If you specify the AT_TIMESTAMP
or FROM_TIMESTAMP_ID
type,\n you must also provide the value for the optional Timestamp
property. If you\n specify the AT_TIMESTAMP type, then all shards that were open at the provided timestamp\n are returned. If you specify the FROM_TIMESTAMP type, then all shards starting from the\n provided timestamp to TIP are returned.
When the number of shards in the data stream is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken
. You can specify\n this NextToken
value in a subsequent call to ListShards
to\n list the next set of shards. For more information about the use of this pagination token\n when calling the ListShards
operation, see ListShardsInput$NextToken.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to ListShards
, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards
, you get ExpiredNextTokenException
.
When the number of shards in the data stream is greater than the default value for the\n MaxResults
parameter, or if you explicitly specify a value for\n MaxResults
that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken
. You can specify\n this NextToken
value in a subsequent call to ListShards
to\n list the next set of shards. For more information about the use of this pagination token\n when calling the ListShards
operation, see ListShardsInput$NextToken.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to ListShards
, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards
, you get ExpiredNextTokenException
.
Lists the consumers registered to receive data from a stream using enhanced fan-out,\n and provides information about each consumer.
\nThis operation has a limit of 5 transactions per second per stream.
", + "smithy.api#documentation": "Lists the consumers registered to receive data from a stream using enhanced fan-out,\n and provides information about each consumer.
\nThis operation has a limit of 5 transactions per second per stream.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", "pageSize": "MaxResults" - }, - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } } } }, @@ -5850,16 +3765,13 @@ "target": "com.amazonaws.kinesis#StreamARN", "traits": { "smithy.api#documentation": "The ARN of the Kinesis data stream for which you want to list the registered\n consumers. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.
", - "smithy.api#required": {}, - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#required": {} } }, "NextToken": { "target": "com.amazonaws.kinesis#NextToken", "traits": { - "smithy.api#documentation": "When the number of consumers that are registered with the data stream is greater than\n the default value for the MaxResults
parameter, or if you explicitly\n specify a value for MaxResults
that is less than the number of consumers\n that are registered with the data stream, the response includes a pagination token named\n NextToken
. You can specify this NextToken
value in a\n subsequent call to ListStreamConsumers
to list the next set of registered\n consumers.
Don't specify StreamName
or StreamCreationTimestamp
if you\n specify NextToken
because the latter unambiguously identifies the\n stream.
You can optionally specify a value for the MaxResults
parameter when you\n specify NextToken
. If you specify a MaxResults
value that is\n less than the number of consumers that the operation returns if you don't specify\n MaxResults
, the response will contain a new NextToken
\n value. You can use the new NextToken
value in a subsequent call to the\n ListStreamConsumers
operation to list the next set of consumers.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to\n ListStreamConsumers
, you have 300 seconds to use that value. If you\n specify an expired token in a call to ListStreamConsumers
, you get\n ExpiredNextTokenException
.
When the number of consumers that are registered with the data stream is greater than\n the default value for the MaxResults
parameter, or if you explicitly\n specify a value for MaxResults
that is less than the number of consumers\n that are registered with the data stream, the response includes a pagination token named\n NextToken
. You can specify this NextToken
value in a\n subsequent call to ListStreamConsumers
to list the next set of registered\n consumers.
Don't specify StreamName
or StreamCreationTimestamp
if you\n specify NextToken
because the latter unambiguously identifies the\n stream.
You can optionally specify a value for the MaxResults
parameter when you\n specify NextToken
. If you specify a MaxResults
value that is\n less than the number of consumers that the operation returns if you don't specify\n MaxResults
, the response will contain a new NextToken
\n value. You can use the new NextToken
value in a subsequent call to the\n ListStreamConsumers
operation to list the next set of consumers.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to\n ListStreamConsumers
, you have 300 seconds to use that value. If you\n specify an expired token in a call to ListStreamConsumers
, you get\n ExpiredNextTokenException
.
Specify this input parameter to distinguish data streams that have the same name. For\n example, if you create a data stream and then delete it, and you later create another\n data stream with the same name, you can use this input parameter to specify which of the\n two streams you want to list the consumers for.
\nYou can't specify this parameter if you specify the NextToken parameter.
" + "smithy.api#documentation": "Specify this input parameter to distinguish data streams that have the same name. For\n example, if you create a data stream and then delete it, and you later create another\n data stream with the same name, you can use this input parameter to specify which of the\n two streams you want to list the consumers for.
\nYou can't specify this parameter if you specify the NextToken parameter.
" } } } @@ -5897,7 +3809,7 @@ "NextToken": { "target": "com.amazonaws.kinesis#NextToken", "traits": { - "smithy.api#documentation": "When the number of consumers that are registered with the data stream is greater than\n the default value for the MaxResults
parameter, or if you explicitly\n specify a value for MaxResults
that is less than the number of registered\n consumers, the response includes a pagination token named NextToken
. You\n can specify this NextToken
value in a subsequent call to\n ListStreamConsumers
to list the next set of registered consumers. For\n more information about the use of this pagination token when calling the\n ListStreamConsumers
operation, see ListStreamConsumersInput$NextToken.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to\n ListStreamConsumers
, you have 300 seconds to use that value. If you\n specify an expired token in a call to ListStreamConsumers
, you get\n ExpiredNextTokenException
.
When the number of consumers that are registered with the data stream is greater than\n the default value for the MaxResults
parameter, or if you explicitly\n specify a value for MaxResults
that is less than the number of registered\n consumers, the response includes a pagination token named NextToken
. You\n can specify this NextToken
value in a subsequent call to\n ListStreamConsumers
to list the next set of registered consumers. For\n more information about the use of this pagination token when calling the\n ListStreamConsumers
operation, see ListStreamConsumersInput$NextToken.
Tokens expire after 300 seconds. When you obtain a value for\n NextToken
in the response to a call to\n ListStreamConsumers
, you have 300 seconds to use that value. If you\n specify an expired token in a call to ListStreamConsumers
, you get\n ExpiredNextTokenException
.
Lists your Kinesis data streams.
\nThe number of streams may be too large to return from a single call to\n ListStreams
. You can limit the number of returned streams using the\n Limit
parameter. If you do not specify a value for the\n Limit
parameter, Kinesis Data Streams uses the default limit, which is\n currently 100.
You can detect if there are more streams available to list by using the\n HasMoreStreams
flag from the returned output. If there are more streams\n available, you can request more streams by using the name of the last stream returned by\n the ListStreams
request in the ExclusiveStartStreamName
\n parameter in a subsequent request to ListStreams
. The group of stream names\n returned by the subsequent request is then added to the list. You can continue this\n process until all the stream names have been collected in the list.
\n ListStreams has a limit of five transactions per second per\n account.
", - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "pageSize": "Limit" - } + "smithy.api#documentation": "Lists your Kinesis data streams.
\nThe number of streams may be too large to return from a single call to\n ListStreams
. You can limit the number of returned streams using the\n Limit
parameter. If you do not specify a value for the\n Limit
parameter, Kinesis Data Streams uses the default limit, which is\n currently 100.
You can detect if there are more streams available to list by using the\n HasMoreStreams
flag from the returned output. If there are more streams\n available, you can request more streams by using the name of the last stream returned by\n the ListStreams
request in the ExclusiveStartStreamName
\n parameter in a subsequent request to ListStreams
. The group of stream names\n returned by the subsequent request is then added to the list. You can continue this\n process until all the stream names have been collected in the list.
\n ListStreams has a limit of five transactions per second per\n account.
" } }, "com.amazonaws.kinesis#ListStreamsInput": { @@ -5944,12 +3845,6 @@ "traits": { "smithy.api#documentation": "The name of the stream to start the list with.
" } - }, - "NextToken": { - "target": "com.amazonaws.kinesis#NextToken", - "traits": { - "smithy.api#documentation": "" - } } }, "traits": { @@ -5981,18 +3876,6 @@ "smithy.api#documentation": "If set to true
, there are more streams available to list.
Lists the tags for the specified Kinesis data stream. This operation has a limit of\n five transactions per second per account.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Lists the tags for the specified Kinesis data stream. This operation has a limit of\n five transactions per second per account.
" } }, "com.amazonaws.kinesis#ListTagsForStreamInput": { @@ -6036,7 +3911,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream.
" + "smithy.api#documentation": "The name of the stream.
", + "smithy.api#required": {} } }, "ExclusiveStartTagKey": { @@ -6048,16 +3924,7 @@ "Limit": { "target": "com.amazonaws.kinesis#ListTagsForStreamInputLimit", "traits": { - "smithy.api#documentation": "The number of tags to return. If this number is less than the total number of tags\n associated with the stream, HasMoreTags
is set to true
. To\n list additional tags, set ExclusiveStartTagKey
to the last key in the\n response.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "The number of tags to return. If this number is less than the total number of tags\n associated with the stream, HasMoreTags
is set to true
. To\n list additional tags, set ExclusiveStartTagKey
to the last key in the\n response.
Merges two adjacent shards in a Kinesis data stream and combines them into a single\n shard to reduce the stream's capacity to ingest and transport data. This API is only\n supported for the data streams with the provisioned capacity mode. Two shards are\n considered adjacent if the union of the hash key ranges for the two shards form a\n contiguous set with no gaps. For example, if you have two shards, one with a hash key\n range of 276...381 and the other with a hash key range of 382...454, then you could\n merge these two shards into a single shard that would have a hash key range of\n 276...454. After the merge, the single child shard receives data for all hash key values\n covered by the two parent shards.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
\n MergeShards
is called when there is a need to reduce the overall capacity\n of a stream because of excess capacity that is not being used. You must specify the\n shard to be merged and the adjacent shard for a stream. For more information about\n merging shards, see Merge Two\n Shards in the Amazon Kinesis Data Streams Developer\n Guide.
If the stream is in the ACTIVE
state, you can call\n MergeShards
. If a stream is in the CREATING
,\n UPDATING
, or DELETING
state, MergeShards
\n returns a ResourceInUseException
. If the specified stream does not exist,\n MergeShards
returns a ResourceNotFoundException
.
You can use DescribeStreamSummary to check the state of the stream,\n which is returned in StreamStatus
.
\n MergeShards
is an asynchronous operation. Upon receiving a\n MergeShards
request, Amazon Kinesis Data Streams immediately returns a\n response and sets the StreamStatus
to UPDATING
. After the\n operation is completed, Kinesis Data Streams sets the StreamStatus
to\n ACTIVE
. Read and write operations continue to work while the stream is\n in the UPDATING
state.
You use DescribeStreamSummary and the ListShards\n APIs to determine the shard IDs that are specified in the MergeShards
\n request.
If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards
,\n or SplitShard, you receive a LimitExceededException
.
\n MergeShards
has a limit of five transactions per second per account.
Merges two adjacent shards in a Kinesis data stream and combines them into a single\n shard to reduce the stream's capacity to ingest and transport data. Two shards are\n considered adjacent if the union of the hash key ranges for the two shards form a\n contiguous set with no gaps. For example, if you have two shards, one with a hash key\n range of 276...381 and the other with a hash key range of 382...454, then you could\n merge these two shards into a single shard that would have a hash key range of\n 276...454. After the merge, the single child shard receives data for all hash key values\n covered by the two parent shards.
\n\n MergeShards
is called when there is a need to reduce the overall capacity\n of a stream because of excess capacity that is not being used. You must specify the\n shard to be merged and the adjacent shard for a stream. For more information about\n merging shards, see Merge Two\n Shards in the Amazon Kinesis Data Streams Developer\n Guide.
If the stream is in the ACTIVE
state, you can call\n MergeShards
. If a stream is in the CREATING
,\n UPDATING
, or DELETING
state, MergeShards
\n returns a ResourceInUseException
. If the specified stream does not exist,\n MergeShards
returns a ResourceNotFoundException
.
You can use DescribeStreamSummary to check the state of the stream,\n which is returned in StreamStatus
.
\n MergeShards
is an asynchronous operation. Upon receiving a\n MergeShards
request, Amazon Kinesis Data Streams immediately returns a\n response and sets the StreamStatus
to UPDATING
. After the\n operation is completed, Kinesis Data Streams sets the StreamStatus
to\n ACTIVE
. Read and write operations continue to work while the stream is\n in the UPDATING
state.
You use DescribeStreamSummary and the ListShards\n APIs to determine the shard IDs that are specified in the MergeShards
\n request.
If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards
,\n or SplitShard, you receive a LimitExceededException
.
\n MergeShards
has a limit of five transactions per second per account.
The name of the stream for the merge.
" + "smithy.api#documentation": "The name of the stream for the merge.
", + "smithy.api#required": {} } }, "ShardToMerge": { @@ -6155,15 +4015,6 @@ "smithy.api#documentation": "The shard ID of the adjacent shard for the merge.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -6171,56 +4022,42 @@ } }, "com.amazonaws.kinesis#MetricsName": { - "type": "enum", - "members": { - "INCOMING_BYTES": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IncomingBytes" - } - }, - "INCOMING_RECORDS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IncomingRecords" - } - }, - "OUTGOING_BYTES": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OutgoingBytes" - } - }, - "OUTGOING_RECORDS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OutgoingRecords" - } - }, - "WRITE_PROVISIONED_THROUGHPUT_EXCEEDED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WriteProvisionedThroughputExceeded" - } - }, - "READ_PROVISIONED_THROUGHPUT_EXCEEDED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ReadProvisionedThroughputExceeded" - } - }, - "ITERATOR_AGE_MILLISECONDS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IteratorAgeMilliseconds" - } - }, - "ALL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ALL" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "IncomingBytes", + "name": "INCOMING_BYTES" + }, + { + "value": "IncomingRecords", + "name": "INCOMING_RECORDS" + }, + { + "value": "OutgoingBytes", + "name": "OUTGOING_BYTES" + }, + { + "value": "OutgoingRecords", + "name": "OUTGOING_RECORDS" + }, + { + "value": "WriteProvisionedThroughputExceeded", + "name": "WRITE_PROVISIONED_THROUGHPUT_EXCEEDED" + }, + { + "value": "ReadProvisionedThroughputExceeded", + "name": "READ_PROVISIONED_THROUGHPUT_EXCEEDED" + }, + { + "value": "IteratorAgeMilliseconds", + "name": "ITERATOR_AGE_MILLISECONDS" + }, + { + "value": "ALL", + "name": "ALL" } - } + ] } }, "com.amazonaws.kinesis#MetricsNameList": { @@ -6311,9 +4148,6 @@ "target": "com.amazonaws.kinesis#PutRecordOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -6343,12 +4177,7 @@ } ], "traits": { - "smithy.api#documentation": "Writes a single data record into an Amazon Kinesis data stream. Call\n PutRecord
to send data into the stream for real-time ingestion and\n subsequent processing, one record at a time. Each shard can support writes up to 1,000\n records per second, up to a maximum data write total of 1 MiB per second.
When invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
You must specify the name of the stream that captures, stores, and transports the\n data; a partition key; and the data blob itself.
\nThe data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.
\nThe partition key is used by Kinesis Data Streams to distribute data across shards.\n Kinesis Data Streams segregates the data records that belong to a stream into multiple\n shards, using the partition key associated with each data record to determine the shard\n to which a given data record belongs.
\nPartition keys are Unicode strings, with a maximum length limit of 256 characters for\n each key. An MD5 hash function is used to map partition keys to 128-bit integer values\n and to map associated data records to shards using the hash key ranges of the shards.\n You can override hashing the partition key to determine the shard by explicitly\n specifying a hash value using the ExplicitHashKey
parameter. For more\n information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.
\n PutRecord
returns the shard ID of where the data record was placed and the\n sequence number that was assigned to the data record.
Sequence numbers increase over time and are specific to a shard within a stream, not\n across all shards within a stream. To guarantee strictly increasing ordering, write\n serially to a shard and use the SequenceNumberForOrdering
parameter. For\n more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.
After you write a record to a stream, you cannot modify that record or its order\n within the stream.
\nIf a PutRecord
request cannot be processed because of insufficient\n provisioned throughput on the shard involved in the request, PutRecord
\n throws ProvisionedThroughputExceededException
.
By default, data records are accessible for 24 hours from the time that they are added\n to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "data" - } - } + "smithy.api#documentation": "Writes a single data record into an Amazon Kinesis data stream. Call\n PutRecord
to send data into the stream for real-time ingestion and\n subsequent processing, one record at a time. Each shard can support writes up to 1,000\n records per second, up to a maximum data write total of 1 MiB per second.
You must specify the name of the stream that captures, stores, and transports the\n data; a partition key; and the data blob itself.
\nThe data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.
\nThe partition key is used by Kinesis Data Streams to distribute data across shards.\n Kinesis Data Streams segregates the data records that belong to a stream into multiple\n shards, using the partition key associated with each data record to determine the shard\n to which a given data record belongs.
\nPartition keys are Unicode strings, with a maximum length limit of 256 characters for\n each key. An MD5 hash function is used to map partition keys to 128-bit integer values\n and to map associated data records to shards using the hash key ranges of the shards.\n You can override hashing the partition key to determine the shard by explicitly\n specifying a hash value using the ExplicitHashKey
parameter. For more\n information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.
\n PutRecord
returns the shard ID of where the data record was placed and the\n sequence number that was assigned to the data record.
Sequence numbers increase over time and are specific to a shard within a stream, not\n across all shards within a stream. To guarantee strictly increasing ordering, write\n serially to a shard and use the SequenceNumberForOrdering
parameter. For\n more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.
After you write a record to a stream, you cannot modify that record or its order\n within the stream.
\nIf a PutRecord
request cannot be processed because of insufficient\n provisioned throughput on the shard involved in the request, PutRecord
\n throws ProvisionedThroughputExceededException
.
By default, data records are accessible for 24 hours from the time that they are added\n to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.
" } }, "com.amazonaws.kinesis#PutRecordInput": { @@ -6357,7 +4186,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream to put the data record into.
" + "smithy.api#documentation": "The name of the stream to put the data record into.
", + "smithy.api#required": {} } }, "Data": { @@ -6385,15 +4215,6 @@ "traits": { "smithy.api#documentation": "Guarantees strictly increasing sequence numbers, for puts from the same client and to\n the same partition key. Usage: set the SequenceNumberForOrdering
of record\n n to the sequence number of record n-1 (as\n returned in the result when putting record n-1). If this parameter\n is not set, records are coarsely ordered based on arrival time.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -6420,7 +4241,7 @@ "EncryptionType": { "target": "com.amazonaws.kinesis#EncryptionType", "traits": { - "smithy.api#documentation": "The encryption type to use on the record. This parameter can be one of the following\n values:
\n\n NONE
: Do not encrypt the records in the stream.
\n KMS
: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.
The encryption type to use on the record. This parameter can be one of the following\n values:
\n\n NONE
: Do not encrypt the records in the stream.
\n KMS
: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.
Writes multiple data records into a Kinesis data stream in a single call (also\n referred to as a PutRecords
request). Use this operation to send data into\n the stream for data ingestion and processing.
When invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Each PutRecords
request can support up to 500 records. Each record in the\n request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,\n including partition keys. Each shard can support writes up to 1,000 records per second,\n up to a maximum data write total of 1 MiB per second.
You must specify the name of the stream that captures, stores, and transports the\n data; and an array of request Records
, with each record in the array\n requiring a partition key and data blob. The record size limit applies to the total size\n of the partition key and data blob.
The data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.
\nThe partition key is used by Kinesis Data Streams as input to a hash function that\n maps the partition key and associated data to a specific shard. An MD5 hash function is\n used to map partition keys to 128-bit integer values and to map associated data records\n to shards. As a result of this hashing mechanism, all data records with the same\n partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.
\nEach record in the Records
array may include an optional parameter,\n ExplicitHashKey
, which overrides the partition key to shard mapping.\n This parameter allows a data producer to determine explicitly the shard where the record\n is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.
The PutRecords
response includes an array of response\n Records
. Each record in the response array directly correlates with a\n record in the request array using natural ordering, from the top to the bottom of the\n request and response. The response Records
array always includes the same\n number of records as the request array.
The response Records
array includes both successfully and unsuccessfully\n processed records. Kinesis Data Streams attempts to process all records in each\n PutRecords
request. A single record failure does not stop the\n processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering\n of records. If you need to read records in the same order they are written to the\n stream, use PutRecord instead of PutRecords
, and write to\n the same shard.
A successfully processed record includes ShardId
and\n SequenceNumber
values. The ShardId
parameter identifies\n the shard in the stream where the record is stored. The SequenceNumber
\n parameter is an identifier assigned to the put record, unique to all records in the\n stream.
An unsuccessfully processed record includes ErrorCode
and\n ErrorMessage
values. ErrorCode
reflects the type of error\n and can be one of the following values:\n ProvisionedThroughputExceededException
or InternalFailure
.\n ErrorMessage
provides more detailed information about the\n ProvisionedThroughputExceededException
exception including the account\n ID, stream name, and shard ID of the record that was throttled. For more information\n about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.
After you write a record to a stream, you cannot modify that record or its order\n within the stream.
\nBy default, data records are accessible for 24 hours from the time that they are added\n to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "data" - } - } + "smithy.api#documentation": "Writes multiple data records into a Kinesis data stream in a single call (also\n referred to as a PutRecords
request). Use this operation to send data into\n the stream for data ingestion and processing.
Each PutRecords
request can support up to 500 records. Each record in the\n request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,\n including partition keys. Each shard can support writes up to 1,000 records per second,\n up to a maximum data write total of 1 MiB per second.
You must specify the name of the stream that captures, stores, and transports the\n data; and an array of request Records
, with each record in the array\n requiring a partition key and data blob. The record size limit applies to the total size\n of the partition key and data blob.
The data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.
\nThe partition key is used by Kinesis Data Streams as input to a hash function that\n maps the partition key and associated data to a specific shard. An MD5 hash function is\n used to map partition keys to 128-bit integer values and to map associated data records\n to shards. As a result of this hashing mechanism, all data records with the same\n partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.
\nEach record in the Records
array may include an optional parameter,\n ExplicitHashKey
, which overrides the partition key to shard mapping.\n This parameter allows a data producer to determine explicitly the shard where the record\n is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.
The PutRecords
response includes an array of response\n Records
. Each record in the response array directly correlates with a\n record in the request array using natural ordering, from the top to the bottom of the\n request and response. The response Records
array always includes the same\n number of records as the request array.
The response Records
array includes both successfully and unsuccessfully\n processed records. Kinesis Data Streams attempts to process all records in each\n PutRecords
request. A single record failure does not stop the\n processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering\n of records. If you need to read records in the same order they are written to the\n stream, use PutRecord instead of PutRecords
, and write to\n the same shard.
A successfully processed record includes ShardId
and\n SequenceNumber
values. The ShardId
parameter identifies\n the shard in the stream where the record is stored. The SequenceNumber
\n parameter is an identifier assigned to the put record, unique to all records in the\n stream.
An unsuccessfully processed record includes ErrorCode
and\n ErrorMessage
values. ErrorCode
reflects the type of error\n and can be one of the following values:\n ProvisionedThroughputExceededException
or InternalFailure
.\n ErrorMessage
provides more detailed information about the\n ProvisionedThroughputExceededException
exception including the account\n ID, stream name, and shard ID of the record that was throttled. For more information\n about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.
After you write a record to a stream, you cannot modify that record or its order\n within the stream.
\nBy default, data records are accessible for 24 hours from the time that they are added\n to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.
" } }, "com.amazonaws.kinesis#PutRecordsInput": { @@ -6490,16 +4303,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The stream name associated with the request.
" - } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#documentation": "The stream name associated with the request.
", + "smithy.api#required": {} } } }, @@ -6526,7 +4331,7 @@ "EncryptionType": { "target": "com.amazonaws.kinesis#EncryptionType", "traits": { - "smithy.api#documentation": "The encryption type used on the records. This parameter can be one of the following\n values:
\n\n NONE
: Do not encrypt the records.
\n KMS
: Use server-side encryption on the records using a\n customer-managed Amazon Web Services KMS key.
The encryption type used on the records. This parameter can be one of the following\n values:
\n\n NONE
: Do not encrypt the records.
\n KMS
: Use server-side encryption on the records using a\n customer-managed Amazon Web Services KMS key.
The encryption type used on the record. This parameter can be one of the following\n values:
\n\n NONE
: Do not encrypt the records in the stream.
\n KMS
: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.
The encryption type used on the record. This parameter can be one of the following\n values:
\n\n NONE
: Do not encrypt the records in the stream.
\n KMS
: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.
Registers a consumer with a Kinesis data stream. When you use this operation, the\n consumer you register can then call SubscribeToShard to receive data\n from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every\n shard you subscribe to. This rate is unaffected by the total number of consumers that\n read from the same stream.
\nYou can register up to 20 consumers per stream. A given consumer can only be\n registered with one stream at a time.
\nFor an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.
\nThe use of this operation has a limit of five transactions per second per account.\n Also, only 5 consumers can be created simultaneously. In other words, you cannot have\n more than 5 consumers in a CREATING
status at the same time. Registering a\n 6th consumer while there are 5 in a CREATING
status results in a\n LimitExceededException
.
Registers a consumer with a Kinesis data stream. When you use this operation, the\n consumer you register can then call SubscribeToShard to receive data\n from the stream using enhanced fan-out, at a rate of up to 2 MiB per second for every\n shard you subscribe to. This rate is unaffected by the total number of consumers that\n read from the same stream.
\nYou can register up to 20 consumers per stream. A given consumer can only be\n registered with one stream at a time.
\nFor an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.
\nThe use of this operation has a limit of five transactions per second per account.\n Also, only 5 consumers can be created simultaneously. In other words, you cannot have\n more than 5 consumers in a CREATING
status at the same time. Registering a\n 6th consumer while there are 5 in a CREATING
status results in a\n LimitExceededException
.
The ARN of the Kinesis data stream that you want to register the consumer with. For\n more info, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.
", - "smithy.api#required": {}, - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#required": {} } }, "ConsumerName": { @@ -6739,9 +4536,6 @@ "target": "smithy.api#Unit" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -6756,12 +4550,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes tags from the specified Kinesis data stream. Removed tags are deleted and\n cannot be recovered after this operation successfully completes.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
If you specify a tag that does not exist, it is ignored.
\n\n RemoveTagsFromStream has a limit of five transactions per second per\n account.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Removes tags from the specified Kinesis data stream. Removed tags are deleted and\n cannot be recovered after this operation successfully completes.
\nIf you specify a tag that does not exist, it is ignored.
\n\n RemoveTagsFromStream has a limit of five transactions per second per\n account.
" } }, "com.amazonaws.kinesis#RemoveTagsFromStreamInput": { @@ -6770,7 +4559,8 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream.
" + "smithy.api#documentation": "The name of the stream.
", + "smithy.api#required": {} } }, "TagKeys": { @@ -6779,15 +4569,6 @@ "smithy.api#documentation": "A list of tag keys. Each corresponding tag is removed from the stream.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -6828,14 +4609,14 @@ "type": "integer" }, "com.amazonaws.kinesis#ScalingType": { - "type": "enum", - "members": { - "UNIFORM_SCALING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UNIFORM_SCALING" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UNIFORM_SCALING", + "name": "UNIFORM_SCALING" } - } + ] } }, "com.amazonaws.kinesis#SequenceNumber": { @@ -6921,7 +4702,7 @@ "Type": { "target": "com.amazonaws.kinesis#ShardFilterType", "traits": { - "smithy.api#documentation": "The shard type specified in the ShardFilter
parameter. This is a required\n property of the ShardFilter
parameter.
You can specify the following valid values:
\n\n AFTER_SHARD_ID
- the response includes all the shards, starting\n with the shard whose ID immediately follows the ShardId
that you\n provided.
\n AT_TRIM_HORIZON
- the response includes all the shards that were\n open at TRIM_HORIZON
.
\n FROM_TRIM_HORIZON
- (default), the response includes all the\n shards within the retention period of the data stream (trim to tip).
\n AT_LATEST
- the response includes only the currently open shards\n of the data stream.
\n AT_TIMESTAMP
- the response includes all shards whose start\n timestamp is less than or equal to the given timestamp and end timestamp is\n greater than or equal to the given timestamp or still open.
\n FROM_TIMESTAMP
- the response incldues all closed shards whose\n end timestamp is greater than or equal to the given timestamp and also all open\n shards. Corrected to TRIM_HORIZON
of the data stream if\n FROM_TIMESTAMP
is less than the TRIM_HORIZON
\n value.
The shard type specified in the ShardFilter
parameter. This is a required\n property of the ShardFilter
parameter.
You can specify the following valid values:
\n\n AFTER_SHARD_ID
- the response includes all the shards, starting\n with the shard whose ID immediately follows the ShardId
that you\n provided.
\n AT_TRIM_HORIZON
- the response includes all the shards that were\n open at TRIM_HORIZON
.
\n FROM_TRIM_HORIZON
- (default), the response includes all the\n shards within the retention period of the data stream (trim to tip).
\n AT_LATEST
- the response includes only the currently open shards\n of the data stream.
\n AT_TIMESTAMP
- the response includes all shards whose start\n timestamp is less than or equal to the given timestamp and end timestamp is\n greater than or equal to the given timestamp or still open.
\n FROM_TIMESTAMP
- the response incldues all closed shards whose\n end timestamp is greater than or equal to the given timestamp and also all open\n shards. Corrected to TRIM_HORIZON
of the data stream if\n FROM_TIMESTAMP
is less than the TRIM_HORIZON
\n value.
Splits a shard into two new shards in the Kinesis data stream, to increase the\n stream's capacity to ingest and transport data. SplitShard
is called when\n there is a need to increase the overall capacity of a stream because of an expected\n increase in the volume of data records being ingested. This API is only supported for\n the data streams with the provisioned capacity mode.
When invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
You can also use SplitShard
when a shard appears to be approaching its\n maximum utilization; for example, the producers sending data into the specific shard are\n suddenly sending more than previously anticipated. You can also call\n SplitShard
to increase stream capacity, so that more Kinesis Data\n Streams applications can simultaneously read data from the stream for real-time\n processing.
You must specify the shard to be split and the new hash key, which is the position in\n the shard where the shard gets split in two. In many cases, the new hash key might be\n the average of the beginning and ending hash key, but it can be any hash key value in\n the range being mapped into the shard. For more information, see Split a\n Shard in the Amazon Kinesis Data Streams Developer\n Guide.
\nYou can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for the ShardToSplit
\n and NewStartingHashKey
parameters that are specified in the\n SplitShard
request.
\n SplitShard
is an asynchronous operation. Upon receiving a\n SplitShard
request, Kinesis Data Streams immediately returns a response\n and sets the stream status to UPDATING
. After the operation is completed,\n Kinesis Data Streams sets the stream status to ACTIVE
. Read and write\n operations continue to work while the stream is in the UPDATING
state.
You can use DescribeStreamSummary to check the status of the stream,\n which is returned in StreamStatus
. If the stream is in the\n ACTIVE
state, you can call SplitShard
.\n
If the specified stream does not exist, DescribeStreamSummary\n returns a ResourceNotFoundException
. If you try to create more shards than\n are authorized for your account, you receive a LimitExceededException
.
For the default shard limit for an Amazon Web Services account, see Kinesis\n Data Streams Limits in the Amazon Kinesis Data Streams Developer\n Guide. To increase this limit, contact Amazon Web Services\n Support.
\nIf you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a\n LimitExceededException
.
\n SplitShard
has a limit of five transactions per second per account.
Splits a shard into two new shards in the Kinesis data stream, to increase the\n stream's capacity to ingest and transport data. SplitShard
is called when\n there is a need to increase the overall capacity of a stream because of an expected\n increase in the volume of data records being ingested.
You can also use SplitShard
when a shard appears to be approaching its\n maximum utilization; for example, the producers sending data into the specific shard are\n suddenly sending more than previously anticipated. You can also call\n SplitShard
to increase stream capacity, so that more Kinesis Data\n Streams applications can simultaneously read data from the stream for real-time\n processing.
You must specify the shard to be split and the new hash key, which is the position in\n the shard where the shard gets split in two. In many cases, the new hash key might be\n the average of the beginning and ending hash key, but it can be any hash key value in\n the range being mapped into the shard. For more information, see Split a\n Shard in the Amazon Kinesis Data Streams Developer\n Guide.
\nYou can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for the ShardToSplit
\n and NewStartingHashKey
parameters that are specified in the\n SplitShard
request.
\n SplitShard
is an asynchronous operation. Upon receiving a\n SplitShard
request, Kinesis Data Streams immediately returns a response\n and sets the stream status to UPDATING
. After the operation is completed,\n Kinesis Data Streams sets the stream status to ACTIVE
. Read and write\n operations continue to work while the stream is in the UPDATING
state.
You can use DescribeStreamSummary to check the status of the stream,\n which is returned in StreamStatus
. If the stream is in the\n ACTIVE
state, you can call SplitShard
.\n
If the specified stream does not exist, DescribeStreamSummary\n returns a ResourceNotFoundException
. If you try to create more shards than\n are authorized for your account, you receive a LimitExceededException
.
For the default shard limit for an Amazon Web Services account, see Kinesis\n Data Streams Limits in the Amazon Kinesis Data Streams Developer\n Guide. To increase this limit, contact Amazon Web Services\n Support.
\nIf you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a\n LimitExceededException
.
\n SplitShard
has a limit of five transactions per second per account.
The name of the stream for the shard split.
" + "smithy.api#documentation": "The name of the stream for the shard split.
", + "smithy.api#required": {} } }, "ShardToSplit": { @@ -7108,15 +4864,6 @@ "smithy.api#documentation": "A hash key value for the starting hash key of one of the child shards created by the\n split. The hash key range for a given shard constitutes a set of ordered contiguous\n positive integers. The value for NewStartingHashKey
must be in the range of\n hash keys being mapped into the shard. The NewStartingHashKey
hash key\n value and all higher hash key values in hash key range are distributed to one of the\n child shards. All the lower hash key values in the range are distributed to the other\n child shard.
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } }, "traits": { @@ -7132,9 +4879,6 @@ "target": "smithy.api#Unit" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -7167,12 +4911,7 @@ } ], "traits": { - "smithy.api#documentation": "Enables or updates server-side encryption using an Amazon Web Services KMS key for a\n specified stream.
\nStarting encryption is an asynchronous operation. Upon receiving the request, Kinesis\n Data Streams returns immediately and sets the status of the stream to\n UPDATING
. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE
. Updating or applying encryption\n normally takes a few seconds to complete, but it can take minutes. You can continue to\n read and write data to your stream while its status is UPDATING
. Once the\n status of the stream is ACTIVE
, encryption begins for records written to\n the stream.
API Limits: You can successfully apply a new Amazon Web Services KMS key for\n server-side encryption 25 times in a rolling 24-hour period.
\nNote: It can take up to 5 seconds after the stream is in an ACTIVE
status\n before all records written to the stream are encrypted. After you enable encryption, you\n can verify that encryption is applied by inspecting the API response from\n PutRecord
or PutRecords
.
When invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Enables or updates server-side encryption using an Amazon Web Services KMS key for a\n specified stream.
\nStarting encryption is an asynchronous operation. Upon receiving the request, Kinesis\n Data Streams returns immediately and sets the status of the stream to\n UPDATING
. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE
. Updating or applying encryption\n normally takes a few seconds to complete, but it can take minutes. You can continue to\n read and write data to your stream while its status is UPDATING
. Once the\n status of the stream is ACTIVE
, encryption begins for records written to\n the stream.
API Limits: You can successfully apply a new Amazon Web Services KMS key for\n server-side encryption 25 times in a rolling 24-hour period.
\nNote: It can take up to 5 seconds after the stream is in an ACTIVE
status\n before all records written to the stream are encrypted. After you enable encryption, you\n can verify that encryption is applied by inspecting the API response from\n PutRecord
or PutRecords
.
The name of the stream for which to start encrypting records.
" + "smithy.api#documentation": "The name of the stream for which to start encrypting records.
", + "smithy.api#required": {} } }, "EncryptionType": { @@ -7194,18 +4934,9 @@ "KeyId": { "target": "com.amazonaws.kinesis#KeyId", "traits": { - "smithy.api#documentation": "The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified Amazon Resource Name\n (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also\n use a master key owned by Kinesis Data Streams by specifying the alias\n aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified Amazon Resource Name\n (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also\n use a master key owned by Kinesis Data Streams by specifying the alias\n aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } } }, @@ -7215,7 +4946,7 @@ "Type": { "target": "com.amazonaws.kinesis#ShardIteratorType", "traits": { - "smithy.api#documentation": "You can set the starting position to one of the following values:
\n\n AT_SEQUENCE_NUMBER
: Start streaming from the position denoted by the\n sequence number specified in the SequenceNumber
field.
\n AFTER_SEQUENCE_NUMBER
: Start streaming right after the position denoted\n by the sequence number specified in the SequenceNumber
field.
\n AT_TIMESTAMP
: Start streaming from the position denoted by the time stamp\n specified in the Timestamp
field.
\n TRIM_HORIZON
: Start streaming at the last untrimmed record in the shard,\n which is the oldest data record in the shard.
\n LATEST
: Start streaming just after the most recent record in the shard,\n so that you always read the most recent data in the shard.
You can set the starting position to one of the following values:
\n\n AT_SEQUENCE_NUMBER
: Start streaming from the position denoted by the\n sequence number specified in the SequenceNumber
field.
\n AFTER_SEQUENCE_NUMBER
: Start streaming right after the position denoted\n by the sequence number specified in the SequenceNumber
field.
\n AT_TIMESTAMP
: Start streaming from the position denoted by the time stamp\n specified in the Timestamp
field.
\n TRIM_HORIZON
: Start streaming at the last untrimmed record in the shard,\n which is the oldest data record in the shard.
\n LATEST
: Start streaming just after the most recent record in the shard,\n so that you always read the most recent data in the shard.
Disables server-side encryption for a specified stream.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis\n Data Streams returns immediately and sets the status of the stream to\n UPDATING
. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE
. Stopping encryption normally takes a\n few seconds to complete, but it can take minutes. You can continue to read and write\n data to your stream while its status is UPDATING
. Once the status of the\n stream is ACTIVE
, records written to the stream are no longer encrypted by\n Kinesis Data Streams.
API Limits: You can successfully disable server-side encryption 25 times in a rolling\n 24-hour period.
\nNote: It can take up to 5 seconds after the stream is in an ACTIVE
status\n before all records written to the stream are no longer subject to encryption. After you\n disabled encryption, you can verify that encryption is not applied by inspecting the API\n response from PutRecord
or PutRecords
.
Disables server-side encryption for a specified stream.
\nStopping encryption is an asynchronous operation. Upon receiving the request, Kinesis\n Data Streams returns immediately and sets the status of the stream to\n UPDATING
. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE
. Stopping encryption normally takes a\n few seconds to complete, but it can take minutes. You can continue to read and write\n data to your stream while its status is UPDATING
. Once the status of the\n stream is ACTIVE
, records written to the stream are no longer encrypted by\n Kinesis Data Streams.
API Limits: You can successfully disable server-side encryption 25 times in a rolling\n 24-hour period.
\nNote: It can take up to 5 seconds after the stream is in an ACTIVE
status\n before all records written to the stream are no longer subject to encryption. After you\n disabled encryption, you can verify that encryption is not applied by inspecting the API\n response from PutRecord
or PutRecords
.
The name of the stream on which to stop encrypting records.
" + "smithy.api#documentation": "The name of the stream on which to stop encrypting records.
", + "smithy.api#required": {} } }, "EncryptionType": { @@ -7289,18 +5013,9 @@ "KeyId": { "target": "com.amazonaws.kinesis#KeyId", "traits": { - "smithy.api#documentation": "The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified Amazon Resource Name\n (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also\n use a master key owned by Kinesis Data Streams by specifying the alias\n aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified Amazon Resource Name\n (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also\n use a master key owned by Kinesis Data Streams by specifying the alias\n aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } } }, @@ -7311,7 +5026,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^arn:aws.*:kinesis:.*:\\d{12}:stream/\\S+$" + "smithy.api#pattern": "^arn:aws.*:kinesis:.*:\\d{12}:stream/.+$" } }, "com.amazonaws.kinesis#StreamDescription": { @@ -7334,7 +5049,7 @@ "StreamStatus": { "target": "com.amazonaws.kinesis#StreamStatus", "traits": { - "smithy.api#documentation": "The current status of the stream being described. The stream status is one of the\n following states:
\n\n CREATING
- The stream is being created. Kinesis Data Streams\n immediately returns and sets StreamStatus
to\n CREATING
.
\n DELETING
- The stream is being deleted. The specified stream is in\n the DELETING
state until Kinesis Data Streams completes the\n deletion.
\n ACTIVE
- The stream exists and is ready for read and write\n operations or deletion. You should perform read and write operations only on an\n ACTIVE
stream.
\n UPDATING
- Shards in the stream are being merged or split. Read and\n write operations continue to work while the stream is in the\n UPDATING
state.
The current status of the stream being described. The stream status is one of the\n following states:
\n\n CREATING
- The stream is being created. Kinesis Data Streams\n immediately returns and sets StreamStatus
to\n CREATING
.
\n DELETING
- The stream is being deleted. The specified stream is in\n the DELETING
state until Kinesis Data Streams completes the\n deletion.
\n ACTIVE
- The stream exists and is ready for read and write\n operations or deletion. You should perform read and write operations only on an\n ACTIVE
stream.
\n UPDATING
- Shards in the stream are being merged or split. Read and\n write operations continue to work while the stream is in the\n UPDATING
state.
The server-side encryption type used on the stream. This parameter can be one of the\n following values:
\n\n NONE
: Do not encrypt the records in the stream.
\n KMS
: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.
The server-side encryption type used on the stream. This parameter can be one of the\n following values:
\n\n NONE
: Do not encrypt the records in the stream.
\n KMS
: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.
The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified ARN to either an alias\n or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by\n Kinesis Data Streams by specifying the alias aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified ARN to either an alias\n or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by\n Kinesis Data Streams by specifying the alias aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The current status of the stream being described. The stream status is one of the\n following states:
\n\n CREATING
- The stream is being created. Kinesis Data Streams\n immediately returns and sets StreamStatus
to\n CREATING
.
\n DELETING
- The stream is being deleted. The specified stream is in\n the DELETING
state until Kinesis Data Streams completes the\n deletion.
\n ACTIVE
- The stream exists and is ready for read and write\n operations or deletion. You should perform read and write operations only on an\n ACTIVE
stream.
\n UPDATING
- Shards in the stream are being merged or split. Read and\n write operations continue to work while the stream is in the\n UPDATING
state.
The current status of the stream being described. The stream status is one of the\n following states:
\n\n CREATING
- The stream is being created. Kinesis Data Streams\n immediately returns and sets StreamStatus
to\n CREATING
.
\n DELETING
- The stream is being deleted. The specified stream is in\n the DELETING
state until Kinesis Data Streams completes the\n deletion.
\n ACTIVE
- The stream exists and is ready for read and write\n operations or deletion. You should perform read and write operations only on an\n ACTIVE
stream.
\n UPDATING
- Shards in the stream are being merged or split. Read and\n write operations continue to work while the stream is in the\n UPDATING
state.
The encryption type used. This value is one of the following:
\n\n KMS
\n
\n NONE
\n
The encryption type used. This value is one of the following:
\n\n KMS
\n
\n NONE
\n
The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified ARN to either an alias\n or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by\n Kinesis Data Streams by specifying the alias aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example: \n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified ARN to either an alias\n or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by\n Kinesis Data Streams by specifying the alias aws/kinesis
.
Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
\n
Alias ARN example: \n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName
\n
Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012
\n
Alias name example: alias/MyAliasName
\n
Master key owned by Kinesis Data Streams:\n alias/aws/kinesis
\n
The name of a stream.
", - "smithy.api#required": {} - } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.api#required": {} - } - }, - "StreamStatus": { - "target": "com.amazonaws.kinesis#StreamStatus", - "traits": { - "smithy.api#documentation": "The status of the stream.
", - "smithy.api#required": {} - } - }, - "StreamModeDetails": { - "target": "com.amazonaws.kinesis#StreamModeDetails" - }, - "StreamCreationTimestamp": { - "target": "com.amazonaws.kinesis#Timestamp", - "traits": { - "smithy.api#documentation": "The timestamp at which the stream was created.
" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The summary of a stream.
" - } - }, - "com.amazonaws.kinesis#StreamSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.kinesis#StreamSummary" + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "UPDATING", + "name": "UPDATING" + } + ] } }, "com.amazonaws.kinesis#SubscribeToShard": { @@ -7607,9 +5270,6 @@ "target": "com.amazonaws.kinesis#SubscribeToShardOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -7624,12 +5284,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation establishes an HTTP/2 connection between the consumer you specify in\n the ConsumerARN
parameter and the shard you specify in the\n ShardId
parameter. After the connection is successfully established,\n Kinesis Data Streams pushes records from the shard to the consumer over this connection.\n Before you call this operation, call RegisterStreamConsumer to\n register the consumer with Kinesis Data Streams.
When the SubscribeToShard
call succeeds, your consumer starts receiving\n events of type SubscribeToShardEvent over the HTTP/2 connection for up\n to 5 minutes, after which time you need to call SubscribeToShard
again to\n renew the subscription if you want to continue to receive records.
You can make one call to SubscribeToShard
per second per registered\n consumer per shard. For example, if you have a 4000 shard stream and two registered\n stream consumers, you can make one SubscribeToShard
request per second for\n each combination of shard and registered consumer, allowing you to subscribe both\n consumers to all 4000 shards in one second.
If you call SubscribeToShard
again with the same ConsumerARN
\n and ShardId
within 5 seconds of a successful call, you'll get a\n ResourceInUseException
. If you call SubscribeToShard
5\n seconds or more after a successful call, the second call takes over the subscription and\n the previous connection expires or fails with a\n ResourceInUseException
.
For an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "data" - } - } + "smithy.api#documentation": "This operation establishes an HTTP/2 connection between the consumer you specify in\n the ConsumerARN
parameter and the shard you specify in the\n ShardId
parameter. After the connection is successfully established,\n Kinesis Data Streams pushes records from the shard to the consumer over this connection.\n Before you call this operation, call RegisterStreamConsumer to\n register the consumer with Kinesis Data Streams.
When the SubscribeToShard
call succeeds, your consumer starts receiving\n events of type SubscribeToShardEvent over the HTTP/2 connection for up\n to 5 minutes, after which time you need to call SubscribeToShard
again to\n renew the subscription if you want to continue to receive records.
You can make one call to SubscribeToShard
per second per registered\n consumer per shard. For example, if you have a 4000 shard stream and two registered\n stream consumers, you can make one SubscribeToShard
request per second for\n each combination of shard and registered consumer, allowing you to subscribe both\n consumers to all 4000 shards in one second.
If you call SubscribeToShard
again with the same ConsumerARN
\n and ShardId
within 5 seconds of a successful call, you'll get a\n ResourceInUseException
. If you call SubscribeToShard
5\n seconds or more after a successful call, the second call takes over the subscription and\n the previous connection expires or fails with a\n ResourceInUseException
.
For an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.
" } }, "com.amazonaws.kinesis#SubscribeToShardEvent": { @@ -7719,10 +5374,7 @@ "target": "com.amazonaws.kinesis#ConsumerARN", "traits": { "smithy.api#documentation": "For this parameter, use the value you obtained when you called RegisterStreamConsumer.
", - "smithy.api#required": {}, - "smithy.rules#contextParam": { - "name": "ConsumerARN" - } + "smithy.api#required": {} } }, "ShardId": { @@ -7843,9 +5495,6 @@ "target": "com.amazonaws.kinesis#UpdateShardCountOutput" }, "errors": [ - { - "target": "com.amazonaws.kinesis#AccessDeniedException" - }, { "target": "com.amazonaws.kinesis#InvalidArgumentException" }, @@ -7863,12 +5512,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the shard count of the specified stream to the specified number of shards.\n This API is only supported for the data streams with the provisioned capacity\n mode.
\nWhen invoking this API, it is recommended you use the StreamARN
input\n parameter rather than the StreamName
input parameter.
Updating the shard count is an asynchronous operation. Upon receiving the request,\n Kinesis Data Streams returns immediately and sets the status of the stream to\n UPDATING
. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE
. Depending on the size of the stream,\n the scaling action could take a few minutes to complete. You can continue to read and\n write data to your stream while its status is UPDATING
.
To update the shard count, Kinesis Data Streams performs splits or merges on\n individual shards. This can cause short-lived shards to be created, in addition to the\n final shards. These short-lived shards count towards your total shard limit for your\n account in the Region.
\nWhen using this operation, we recommend that you specify a target shard count that is\n a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your\n shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling\n action might take longer to complete.
\nThis operation has the following default limits. By default, you cannot do the\n following:
\nScale more than ten times per rolling 24-hour period per stream
\nScale up to more than double your current shard count for a stream
\nScale down below half your current shard count for a stream
\nScale up to more than 10000 shards in a stream
\nScale a stream with more than 10000 shards down unless the result is less than\n 10000 shards
\nScale up to more than the shard limit for your account
\nFor the default limits for an Amazon Web Services account, see Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide. To request an increase in the call rate limit, the shard limit for\n this API, or your overall shard limit, use the limits form.
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Updates the shard count of the specified stream to the specified number of\n shards.
\nUpdating the shard count is an asynchronous operation. Upon receiving the request,\n Kinesis Data Streams returns immediately and sets the status of the stream to\n UPDATING
. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE
. Depending on the size of the stream,\n the scaling action could take a few minutes to complete. You can continue to read and\n write data to your stream while its status is UPDATING
.
To update the shard count, Kinesis Data Streams performs splits or merges on\n individual shards. This can cause short-lived shards to be created, in addition to the\n final shards. These short-lived shards count towards your total shard limit for your\n account in the Region.
\nWhen using this operation, we recommend that you specify a target shard count that is\n a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your\n shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling\n action might take longer to complete.
\nThis operation has the following default limits. By default, you cannot do the\n following:
\nScale more than ten times per rolling 24-hour period per stream
\nScale up to more than double your current shard count for a stream
\nScale down below half your current shard count for a stream
\nScale up to more than 10000 shards in a stream
\nScale a stream with more than 10000 shards down unless the result is less than\n 10000 shards
\nScale up to more than the shard limit for your account
\nFor the default limits for an Amazon Web Services account, see Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide. To request an increase in the call rate limit, the shard limit for\n this API, or your overall shard limit, use the limits form.
" } }, "com.amazonaws.kinesis#UpdateShardCountInput": { @@ -7877,13 +5521,14 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "The name of the stream.
" + "smithy.api#documentation": "The name of the stream.
", + "smithy.api#required": {} } }, "TargetShardCount": { "target": "com.amazonaws.kinesis#PositiveIntegerObject", "traits": { - "smithy.api#documentation": "The new number of shards. This value has the following default limits. By default, you\n cannot do the following:
\nSet this value to more than double your current shard count for a\n stream.
\nSet this value below half your current shard count for a stream.
\nSet this value to more than 10000 shards in a stream (the default limit for\n shard count per stream is 10000 per account per region), unless you request a\n limit increase.
\nScale a stream with more than 10000 shards down unless you set this value to\n less than 10000 shards.
\nThe new number of shards. This value has the following default limits. By default, you\n cannot do the following:
\nSet this value to more than double your current shard count for a\n stream.
\nSet this value below half your current shard count for a stream.
\nSet this value to more than 10000 shards in a stream (the default limit for\n shard count per stream is 10000 per account per region), unless you request a\n limit increase.
\nScale a stream with more than 10000 shards down unless you set this value to\n less than 10000 shards.
\nThe scaling type. Uniform scaling creates shards of equal size.
", "smithy.api#required": {} } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
", - "smithy.rules#contextParam": { - "name": "StreamARN" - } - } } } }, @@ -7925,12 +5561,6 @@ "traits": { "smithy.api#documentation": "The updated number of shards.
" } - }, - "StreamARN": { - "target": "com.amazonaws.kinesis#StreamARN", - "traits": { - "smithy.api#documentation": "The ARN of the stream.
" - } } } }, @@ -7957,12 +5587,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you\n can choose between an on-demand capacity mode and a\n provisioned capacity mode for your data stream.\n
", - "smithy.rules#staticContextParams": { - "OperationType": { - "value": "control" - } - } + "smithy.api#documentation": "Updates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you\n can choose between an on-demand capacity mode and a\n provisioned capacity mode for your data stream.\n
" } }, "com.amazonaws.kinesis#UpdateStreamModeInput": { @@ -7972,10 +5597,7 @@ "target": "com.amazonaws.kinesis#StreamARN", "traits": { "smithy.api#documentation": "Specifies the ARN of the data stream whose capacity mode you want to update.
", - "smithy.api#required": {}, - "smithy.rules#contextParam": { - "name": "StreamARN" - } + "smithy.api#required": {} } }, "StreamModeDetails": { @@ -7995,7 +5617,7 @@ } }, "traits": { - "smithy.api#documentation": "Specifies that you tried to invoke this API for a data stream with the on-demand\n capacity mode. This API is only supported for data streams with the provisioned capacity\n mode.
", + "smithy.api#documentation": "\n \n
", "smithy.api#error": "client" } } diff --git a/codegen/sdk/aws-models/license-manager-user-subscriptions.json b/codegen/sdk/aws-models/license-manager-user-subscriptions.json index 73d2109619a..453ca482ffe 100644 --- a/codegen/sdk/aws-models/license-manager-user-subscriptions.json +++ b/codegen/sdk/aws-models/license-manager-user-subscriptions.json @@ -2900,4 +2900,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/location.json b/codegen/sdk/aws-models/location.json index 843475d43a1..ed6a7031ed8 100644 --- a/codegen/sdk/aws-models/location.json +++ b/codegen/sdk/aws-models/location.json @@ -3447,7 +3447,7 @@ "FontStack": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "A comma-separated list of fonts to load glyphs from in order of preference. For\n example, Noto Sans Regular, Arial Unicode
.
Valid fonts stacks for Esri styles:
\nVectorEsriDarkGrayCanvas – Ubuntu Medium Italic
| Ubuntu\n Medium
| Ubuntu Italic
| Ubuntu Regular
|\n Ubuntu Bold
\n
VectorEsriLightGrayCanvas – Ubuntu Italic
| Ubuntu\n Regular
| Ubuntu Light
| Ubuntu Bold
\n
VectorEsriTopographic – Noto Sans Italic
| Noto Sans\n Regular
| Noto Sans Bold
| Noto Serif\n Regular
| Roboto Condensed Light Italic
\n
VectorEsriStreets – Arial Regular
| Arial Italic
|\n Arial Bold
\n
VectorEsriNavigation – Arial Regular
| Arial Italic
\n | Arial Bold
\n
Valid font stacks for HERE Technologies styles:
\nVectorHereContrast – Fira \n GO Regular
| Fira GO Bold
\n
VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – \n Fira GO Italic
| Fira GO Map
| \n Fira GO Map Bold
| Noto Sans CJK JP Bold
| \n Noto Sans CJK JP Light
| \n Noto Sans CJK JP Regular
\n
Valid font stacks for Open Data (Preview) styles:
\nVectorOpenDataStandardLight – \n Amazon Ember Regular,Noto Sans Regular
|\n Amazon Ember Bold,Noto Sans Bold
| \n Amazon Ember Medium,Noto Sans Medium
|\n Amazon Ember Regular Italic,Noto Sans Italic
| \n Amazon Ember Condensed RC Regular,Noto Sans Regular
| \n Amazon Ember Condensed RC Bold,Noto Sans Bold
\n
The fonts used by VectorOpenDataStandardLight
are combined fonts\n that use Amazon Ember
for most glyphs but Noto Sans
\n for glyphs unsupported by Amazon Ember
.
A comma-separated list of fonts to load glyphs from in order of preference. For\n example, Noto Sans Regular, Arial Unicode
.
Valid fonts stacks for Esri styles:
\nVectorEsriDarkGrayCanvas – Ubuntu Medium Italic
| Ubuntu\n Medium
| Ubuntu Italic
| Ubuntu Regular
|\n Ubuntu Bold
\n
VectorEsriLightGrayCanvas – Ubuntu Italic
| Ubuntu\n Regular
| Ubuntu Light
| Ubuntu Bold
\n
VectorEsriTopographic – Noto Sans Italic
| Noto Sans\n Regular
| Noto Sans Bold
| Noto Serif\n Regular
| Roboto Condensed Light Italic
\n
VectorEsriStreets – Arial Regular
| Arial Italic
|\n Arial Bold
\n
VectorEsriNavigation – Arial Regular
| Arial Italic
\n | Arial Bold
\n
Valid font stacks for HERE Technologies styles:
\nVectorHereContrast – Fira \n GO Regular
| Fira GO Bold
\n
VectorHereExplore, VectorHereExploreTruck, HybridHereExploreSatellite – \n Fira GO Italic
| Fira GO Map
| \n Fira GO Map Bold
| Noto Sans CJK JP Bold
| \n Noto Sans CJK JP Light
| \n Noto Sans CJK JP Regular
\n
Specifies the map style selected from an available data provider.
\nValid Esri map styles:
\n\n VectorEsriDarkGrayCanvas
– The Esri Dark Gray Canvas map style. A\n vector basemap with a dark gray, neutral background with minimal colors, labels,\n and features that's designed to draw attention to your thematic content.
\n RasterEsriImagery
– The Esri Imagery map style. A raster basemap\n that provides one meter or better satellite and aerial imagery in many parts of\n the world and lower resolution satellite imagery worldwide.
\n VectorEsriLightGrayCanvas
– The Esri Light Gray Canvas map style,\n which provides a detailed vector basemap with a light gray, neutral background\n style with minimal colors, labels, and features that's designed to draw\n attention to your thematic content.
\n VectorEsriTopographic
– The Esri Light map style, which provides\n a detailed vector basemap with a classic Esri map style.
\n VectorEsriStreets
– The Esri World Streets map style, which\n provides a detailed vector basemap for the world symbolized with a classic Esri\n street map style. The vector tile layer is similar in content and style to the\n World Street Map raster map.
\n VectorEsriNavigation
– The Esri World Navigation map style, which\n provides a detailed basemap for the world symbolized with a custom navigation\n map style that's designed for use during the day in mobile devices.
Valid HERE\n Technologies map styles:
\n\n VectorHereContrast
– The HERE Contrast (Berlin) map style is a \n high contrast\n detailed base map of the world that blends 3D and 2D rendering.
The VectorHereContrast
style has been renamed from \n VectorHereBerlin
. \n VectorHereBerlin
has been deprecated, but will continue to work in \n applications that use it.
\n VectorHereExplore
– A default HERE map style containing a \n neutral, global map and its features including roads, buildings, landmarks, \n and water features. It also now includes a fully designed map of Japan.
\n VectorHereExploreTruck
– A global map containing truck \n restrictions and attributes (e.g. width / height / HAZMAT) symbolized with \n highlighted segments and icons on top of HERE Explore to support use cases \n within transport and logistics.
\n RasterHereExploreSatellite
– A global map containing high\n resolution satellite imagery.
\n HybridHereExploreSatellite
– A global map displaying the road \n network, street names, and city labels over satellite imagery. This style \n will automatically retrieve both raster and vector tiles, and your charges \n will be based on total tiles retrieved.
Hybrid styles use both vector and raster tiles when rendering the \n map that you see. This means that more tiles are retrieved than when using \n either vector or raster tiles alone. Your charges will include all tiles \n retrieved.
\nValid Open Data (Preview) map styles:
\n\n VectorOpenDataStandardLight
– The Open Data Standard Light \n (preview) map style provides a detailed basemap for the world suitable for\n website and mobile application use. The map includes highways major roads, \n minor roads, railways, water features, cities, parks, landmarks, building\n footprints, and administrative boundaries.
Open Data maps is in preview. We may add, change, or remove \n features before announcing general availability. For more information, see\n Open Data is in preview release.
\nSpecifies the map style selected from an available data provider.
\nValid Esri map styles:
\n\n VectorEsriDarkGrayCanvas
– The Esri Dark Gray Canvas map style. A\n vector basemap with a dark gray, neutral background with minimal colors, labels,\n and features that's designed to draw attention to your thematic content.
\n RasterEsriImagery
– The Esri Imagery map style. A raster basemap\n that provides one meter or better satellite and aerial imagery in many parts of\n the world and lower resolution satellite imagery worldwide.
\n VectorEsriLightGrayCanvas
– The Esri Light Gray Canvas map style,\n which provides a detailed vector basemap with a light gray, neutral background\n style with minimal colors, labels, and features that's designed to draw\n attention to your thematic content.
\n VectorEsriTopographic
– The Esri Light map style, which provides\n a detailed vector basemap with a classic Esri map style.
\n VectorEsriStreets
– The Esri World Streets map style, which\n provides a detailed vector basemap for the world symbolized with a classic Esri\n street map style. The vector tile layer is similar in content and style to the\n World Street Map raster map.
\n VectorEsriNavigation
– The Esri World Navigation map style, which\n provides a detailed basemap for the world symbolized with a custom navigation\n map style that's designed for use during the day in mobile devices.
Valid HERE\n Technologies map styles:
\n\n VectorHereContrast
– The HERE Contrast (Berlin) map style is a high contrast\n detailed base map of the world that blends 3D and 2D rendering.
The VectorHereContrast
style has been renamed from VectorHereBerlin
. \n VectorHereBerlin
has been deprecated, but will continue to work in \n applications that use it.
\n VectorHereExplore
– A default HERE map style containing a \n neutral, global map and its features including roads, buildings, landmarks, \n and water features. It also now includes a fully designed map of Japan.
\n VectorHereExploreTruck
– A global map containing truck \n restrictions and attributes (e.g. width / height / HAZMAT) symbolized with \n highlighted segments and icons on top of HERE Explore to support use cases \n within transport and logistics.
\n RasterHereExploreSatellite
– A global map containing high\n resolution satellite imagery.
\n HybridHereExploreSatellite
– A global map displaying the road \n network, street names, and city labels over satellite imagery. This style \n will automatically retrieve both raster and vector tiles, and your charges \n will be based on total tiles retrieved.
Hybrid styles use both vector and raster tiles when rendering the \n map that you see. This means that more tiles are retrieved than when using \n either vector or raster tiles alone. Your charges will include all tiles \n retrieved.
\nThe name of the ML model used by the inference scheduler to be listed.
" } - }, - "Status": { - "target": "com.amazonaws.lookoutequipment#InferenceSchedulerStatus", - "traits": { - "smithy.api#documentation": "Specifies the current status of the inference schedulers to list.
" - } } } }, @@ -4349,26 +4308,22 @@ } }, "com.amazonaws.lookoutequipment#ModelStatus": { - "type": "enum", - "members": { - "IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IN_PROGRESS" - } - }, - "SUCCESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SUCCESS" - } - }, - "FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" + }, + { + "value": "SUCCESS", + "name": "SUCCESS" + }, + { + "value": "FAILED", + "name": "FAILED" } - } + ] } }, "com.amazonaws.lookoutequipment#ModelSummaries": { @@ -4443,26 +4398,22 @@ } }, "com.amazonaws.lookoutequipment#Monotonicity": { - "type": "enum", - "members": { - "DECREASING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DECREASING" - } - }, - "INCREASING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INCREASING" - } - }, - "STATIC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STATIC" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DECREASING", + "name": "DECREASING" + }, + { + "value": "INCREASING", + "name": "INCREASING" + }, + { + "value": "STATIC", + "name": "STATIC" } - } + ] } }, "com.amazonaws.lookoutequipment#MultipleOperatingModes": { @@ -4877,20 +4828,18 @@ } }, "com.amazonaws.lookoutequipment#StatisticalIssueStatus": { - "type": "enum", - "members": { - "POTENTIAL_ISSUE_DETECTED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "POTENTIAL_ISSUE_DETECTED" - } - }, - "NO_ISSUE_DETECTED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NO_ISSUE_DETECTED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "POTENTIAL_ISSUE_DETECTED", + "name": "POTENTIAL_ISSUE_DETECTED" + }, + { + "value": "NO_ISSUE_DETECTED", + "name": "NO_ISSUE_DETECTED" } - } + ] } }, "com.amazonaws.lookoutequipment#StopInferenceScheduler": { @@ -5114,74 +5063,54 @@ } }, "com.amazonaws.lookoutequipment#TargetSamplingRate": { - "type": "enum", - "members": { - "PT1S": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT1S" - } - }, - "PT5S": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT5S" - } - }, - "PT10S": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT10S" - } - }, - "PT15S": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT15S" - } - }, - "PT30S": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT30S" - } - }, - "PT1M": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT1M" - } - }, - "PT5M": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT5M" - } - }, - "PT10M": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT10M" - } - }, - "PT15M": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT15M" - } - }, - "PT30M": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT30M" - } - }, - "PT1H": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT1H" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PT1S", + "name": "PT1S" + }, + { + "value": "PT5S", + "name": "PT5S" + }, + { + "value": "PT10S", + "name": "PT10S" + }, + { + "value": "PT15S", + "name": "PT15S" + }, + { + "value": "PT30S", + "name": "PT30S" + }, + { + "value": "PT1M", + "name": "PT1M" + }, + { + "value": "PT5M", + "name": "PT5M" + }, + { + "value": "PT10M", + "name": "PT10M" + }, + { + "value": "PT15M", + "name": "PT15M" + }, + { + "value": "PT30M", + "name": "PT30M" + }, + { + "value": "PT1H", + "name": "PT1H" } - } + ] } }, "com.amazonaws.lookoutequipment#ThrottlingException": { diff --git a/codegen/sdk/aws-models/m2.json b/codegen/sdk/aws-models/m2.json index 0dbb9987c88..462b0530f55 100644 --- a/codegen/sdk/aws-models/m2.json +++ b/codegen/sdk/aws-models/m2.json @@ -206,10 +206,6 @@ { "value": "Deleting", "name": "DELETING" - }, - { - "value": "Deleting From Environment", - "name": "DELETING_FROM_ENVIRONMENT" } ] } @@ -281,7 +277,7 @@ "lastStartTime": { "target": "com.amazonaws.m2#Timestamp", "traits": { - "smithy.api#documentation": "The timestamp when you last started the application. Null until the application runs for the first time.
" + "smithy.api#documentation": "The timestamp when the application was last started. Null until the application has\n started running for the first time.
" } }, "versionStatus": { @@ -293,7 +289,7 @@ "deploymentStatus": { "target": "com.amazonaws.m2#ApplicationDeploymentLifecycle", "traits": { - "smithy.api#documentation": "Indicates either an ongoing deployment or if the application has ever deployed\n successfully.
" + "smithy.api#documentation": "Indicates whether there is an ongoing deployment or if the application has ever deployed\n successfully.
" } } }, @@ -452,7 +448,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -501,6 +497,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -755,58 +760,6 @@ }, "smithy.rules#endpointTests": { "testCases": [ - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2.ap-south-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-south-1" - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2.ap-south-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-south-1" - } - }, { "documentation": "For region ca-central-1 with FIPS enabled and DualStack enabled", "expect": { @@ -815,9 +768,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ca-central-1", "UseDualStack": true, - "Region": "ca-central-1" + "UseFIPS": true } }, { @@ -828,9 +781,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ca-central-1", "UseDualStack": false, - "Region": "ca-central-1" + "UseFIPS": true } }, { @@ -841,9 +794,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ca-central-1", "UseDualStack": true, - "Region": "ca-central-1" + "UseFIPS": false } }, { @@ -854,9 +807,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ca-central-1", "UseDualStack": false, - "Region": "ca-central-1" + "UseFIPS": false } }, { @@ -867,9 +820,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-central-1", "UseDualStack": true, - "Region": "eu-central-1" + "UseFIPS": true } }, { @@ -880,9 +833,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-central-1", "UseDualStack": false, - "Region": "eu-central-1" + "UseFIPS": true } }, { @@ -893,9 +846,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-central-1", "UseDualStack": true, - "Region": "eu-central-1" + "UseFIPS": false } }, { @@ -906,9 +859,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-central-1", "UseDualStack": false, - "Region": "eu-central-1" + "UseFIPS": false } }, { @@ -919,9 +872,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-2", "UseDualStack": true, - "Region": "us-west-2" + "UseFIPS": true } }, { @@ -932,9 +885,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-2", "UseDualStack": false, - "Region": "us-west-2" + "UseFIPS": true } }, { @@ -945,9 +898,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-2", "UseDualStack": true, - "Region": "us-west-2" + "UseFIPS": false } }, { @@ -958,113 +911,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "us-west-2" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2.eu-west-3.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2.eu-west-3.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-3" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2.eu-west-2.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "eu-west-2" - } - }, - { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2.eu-west-2.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, + "Region": "us-west-2", "UseDualStack": false, - "Region": "eu-west-2" + "UseFIPS": false } }, { @@ -1075,9 +924,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-1", "UseDualStack": true, - "Region": "eu-west-1" + "UseFIPS": true } }, { @@ -1088,9 +937,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-1", "UseDualStack": false, - "Region": "eu-west-1" + "UseFIPS": true } }, { @@ -1101,9 +950,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-1", "UseDualStack": true, - "Region": "eu-west-1" + "UseFIPS": false } }, { @@ -1114,61 +963,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "eu-west-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": false, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2.ap-northeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-northeast-1" - } - }, - { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, + "Region": "eu-west-1", "UseDualStack": false, - "Region": "ap-northeast-1" + "UseFIPS": false } }, { @@ -1179,9 +976,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "sa-east-1", "UseDualStack": true, - "Region": "sa-east-1" + "UseFIPS": true } }, { @@ -1192,9 +989,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "sa-east-1", "UseDualStack": false, - "Region": "sa-east-1" + "UseFIPS": true } }, { @@ -1205,9 +1002,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "sa-east-1", "UseDualStack": true, - "Region": "sa-east-1" + "UseFIPS": false } }, { @@ -1218,61 +1015,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "sa-east-1" - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "UseFIPS": true, - "UseDualStack": true, - "Region": "ap-southeast-1" - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2-fips.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": true, + "Region": "sa-east-1", "UseDualStack": false, - "Region": "ap-southeast-1" - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://m2.ap-southeast-1.api.aws" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": true, - "Region": "ap-southeast-1" - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://m2.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "UseFIPS": false, - "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": false } }, { @@ -1283,9 +1028,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-2", "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": true } }, { @@ -1296,9 +1041,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-2", "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": true } }, { @@ -1309,9 +1054,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-2", "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": false } }, { @@ -1322,9 +1067,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-2", "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": false } }, { @@ -1335,9 +1080,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-1", "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": true } }, { @@ -1348,9 +1093,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-east-1" + "UseFIPS": true } }, { @@ -1361,9 +1106,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": false } }, { @@ -1374,9 +1119,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-east-1" + "UseFIPS": false } }, { @@ -1387,9 +1132,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -1399,9 +1144,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -1411,9 +1156,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } @@ -1780,7 +1525,7 @@ "s3:GetObject", "s3:ListBucket" ], - "smithy.api#documentation": "Creates a new application with given parameters. Requires an existing runtime environment and\n application definition file.
", + "smithy.api#documentation": "Creates a new application with given parameters. Requires an existing environment and\n application definition file.
", "smithy.api#http": { "method": "POST", "uri": "/applications", @@ -1831,12 +1576,6 @@ "smithy.api#documentation": "Unique, case-sensitive identifier the service generates to ensure the idempotency of the\n request to create an application. The service generates the clientToken when the API call\n is triggered. The token expires after one hour, so if you retry the API within this\n timeframe with the same clientToken, you will get the same response. The service also\n handles deleting the clientToken after it expires.
", "smithy.api#idempotencyToken": {} } - }, - "kmsKeyId": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The identifier of a customer managed key.
" - } } } }, @@ -1984,7 +1723,7 @@ "elasticloadbalancing:CreateTargetGroup", "elasticloadbalancing:CreateListener" ], - "smithy.api#documentation": "Creates and starts a deployment to deploy an application into a runtime environment.
", + "smithy.api#documentation": "Creates and starts a deployment to deploy an application into an environment.
", "smithy.api#http": { "method": "POST", "uri": "/applications/{applicationId}/deployments", @@ -1999,7 +1738,7 @@ "environmentId": { "target": "com.amazonaws.m2#Identifier", "traits": { - "smithy.api#documentation": "The identifier of the runtime environment where you want to deploy this application.
", + "smithy.api#documentation": "The identifier of the environment where this application will be deployed.
", "smithy.api#required": {} } }, @@ -2103,59 +1842,59 @@ "name": { "target": "com.amazonaws.m2#EntityName", "traits": { - "smithy.api#documentation": "The name of the runtime environment. Must be unique within the account.
", + "smithy.api#documentation": "The unique identifier of the environment.
", "smithy.api#required": {} } }, "instanceType": { "target": "com.amazonaws.m2#String20", "traits": { - "smithy.api#documentation": "The type of instance for the runtime environment.
", + "smithy.api#documentation": "The type of instance for the environment.
", "smithy.api#required": {} } }, "description": { "target": "com.amazonaws.m2#EntityDescription", "traits": { - "smithy.api#documentation": "The description of the runtime environment.
" + "smithy.api#documentation": "The description of the environment.
" } }, "engineType": { "target": "com.amazonaws.m2#EngineType", "traits": { - "smithy.api#documentation": "The engine type for the runtime environment.
", + "smithy.api#documentation": "The engine type for the environment.
", "smithy.api#required": {} } }, "engineVersion": { "target": "com.amazonaws.m2#EngineVersion", "traits": { - "smithy.api#documentation": "The version of the engine type for the runtime environment.
" + "smithy.api#documentation": "The version of the engine type for the environment.
" } }, "subnetIds": { "target": "com.amazonaws.m2#String50List", "traits": { - "smithy.api#documentation": "The list of subnets associated with the VPC for this runtime environment.
" + "smithy.api#documentation": "The list of subnets associated with the VPC for this environment.
" } }, "securityGroupIds": { "target": "com.amazonaws.m2#String50List", "traits": { - "smithy.api#documentation": "The list of security groups for the VPC associated with this runtime environment.
" + "smithy.api#documentation": "The list of security groups for the VPC associated with this environment.
" } }, "storageConfigurations": { "target": "com.amazonaws.m2#StorageConfigurationList", "traits": { - "smithy.api#documentation": "Optional. The storage configurations for this runtime environment.
" + "smithy.api#documentation": "Optional. The storage configurations for this environment.
" } }, "publiclyAccessible": { "target": "com.amazonaws.m2#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Specifies whether the runtime environment is publicly accessible.
" + "smithy.api#documentation": "Specifies whether the environment is publicly accessible.
" } }, "highAvailabilityConfig": { @@ -2167,13 +1906,13 @@ "tags": { "target": "com.amazonaws.m2#TagMap", "traits": { - "smithy.api#documentation": "The tags for the runtime environment.
" + "smithy.api#documentation": "The tags for the environment.
" } }, "preferredMaintenanceWindow": { "target": "com.amazonaws.m2#String50", "traits": { - "smithy.api#documentation": "Configures the maintenance window you want for the runtime environment. If you do not provide a\n value, a random system-generated value will be assigned.
" + "smithy.api#documentation": "Configures a desired maintenance window for the environment. If you do not provide a\n value, a random system-generated value will be assigned.
" } }, "clientToken": { @@ -2182,12 +1921,6 @@ "smithy.api#documentation": "Unique, case-sensitive identifier you provide to ensure the idempotency of the request\n to create an environment. The service generates the clientToken when the API call is\n triggered. The token expires after one hour, so if you retry the API within this timeframe\n with the same clientToken, you will get the same response. The service also handles\n deleting the clientToken after it expires.
", "smithy.api#idempotencyToken": {} } - }, - "kmsKeyId": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The identifier of a customer managed key.
" - } } } }, @@ -2197,7 +1930,7 @@ "environmentId": { "target": "com.amazonaws.m2#Identifier", "traits": { - "smithy.api#documentation": "The unique identifier of the runtime environment.
", + "smithy.api#documentation": "The identifier of this environment.
", "smithy.api#required": {} } } @@ -2222,7 +1955,7 @@ "datasetOrg": { "target": "com.amazonaws.m2#DatasetOrgAttributes", "traits": { - "smithy.api#documentation": "The type of dataset. The only supported value is VSAM.
", + "smithy.api#documentation": "The type of dataset. Possible values include VSAM, IS, PS, GDG, PO, PS, UNKNOWN\n etc.
", "smithy.api#required": {} } }, @@ -2394,7 +2127,7 @@ "dataSetOrg": { "target": "com.amazonaws.m2#String20", "traits": { - "smithy.api#documentation": "The type of data set. The only supported value is VSAM.
" + "smithy.api#documentation": "The type of data set. Possible values include VSAM, IS, PS, GDG, PO, PS, or\n unknown.
" } }, "format": { @@ -2581,7 +2314,7 @@ ], "traits": { "aws.api#controlPlane": {}, - "smithy.api#documentation": "Deletes a specific application from the specific runtime environment where it was previously\n deployed. You cannot delete a runtime environment using DeleteEnvironment if any application has\n ever been deployed to it. This API removes the association of the application with the\n runtime environment so you can delete the environment smoothly.
", + "smithy.api#documentation": "Deletes a specific application from a specified environment where it has been previously\n deployed. You cannot delete an environment using DeleteEnvironment, if any application has\n ever been deployed to it. This API removes the association of the application with the\n environment so you can delete the environment smoothly.
", "smithy.api#http": { "method": "DELETE", "uri": "/applications/{applicationId}/environment/{environmentId}", @@ -2661,7 +2394,7 @@ "aws.iam#requiredActions": [ "elasticloadbalancing:DeleteLoadBalancer" ], - "smithy.api#documentation": "Deletes a specific runtime environment. The environment cannot contain deployed applications. If\n it does, you must delete those applications before you delete the environment.
", + "smithy.api#documentation": "Deletes a specific environment. The environment cannot contain deployed applications. If\n it does, you must delete those applications before you delete the environment.
", "smithy.api#http": { "method": "DELETE", "uri": "/environments/{environmentId}", @@ -2760,7 +2493,7 @@ "environmentId": { "target": "com.amazonaws.m2#Identifier", "traits": { - "smithy.api#documentation": "The unique identifier of the runtime environment.
", + "smithy.api#documentation": "The unique identifier of the environment.
", "smithy.api#required": {} } }, @@ -2947,10 +2680,6 @@ { "value": "Failed", "name": "FAILED" - }, - { - "value": "Updating", - "name": "UPDATING" } ] } @@ -2961,7 +2690,7 @@ "name": { "target": "com.amazonaws.m2#EntityName", "traits": { - "smithy.api#documentation": "The name of the runtime environment.
", + "smithy.api#documentation": "The name of the environment.
", "smithy.api#required": {} } }, @@ -2982,21 +2711,21 @@ "instanceType": { "target": "com.amazonaws.m2#String20", "traits": { - "smithy.api#documentation": "The instance type of the runtime environment.
", + "smithy.api#documentation": "The instance type of the environment.
", "smithy.api#required": {} } }, "status": { "target": "com.amazonaws.m2#EnvironmentLifecycle", "traits": { - "smithy.api#documentation": "The status of the runtime environment
", + "smithy.api#documentation": "The status of the environment
", "smithy.api#required": {} } }, "engineType": { "target": "com.amazonaws.m2#EngineType", "traits": { - "smithy.api#documentation": "The target platform for the runtime environment.
", + "smithy.api#documentation": "The target platform for the environment.
", "smithy.api#required": {} } }, @@ -3010,13 +2739,13 @@ "creationTime": { "target": "com.amazonaws.m2#Timestamp", "traits": { - "smithy.api#documentation": "The timestamp when the runtime environment was created.
", + "smithy.api#documentation": "The timestamp when the environment was created.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Contains a subset of the possible runtime environment attributes. Used in the environment\n list.
" + "smithy.api#documentation": "Contains a subset of the possible environment attributes. Used in the environment\n list.
" } }, "com.amazonaws.m2#EnvironmentSummaryList": { @@ -3256,7 +2985,7 @@ "logGroups": { "target": "com.amazonaws.m2#LogGroupSummaries", "traits": { - "smithy.api#documentation": "The list of log summaries. Each log summary includes the log type as well as the log\n group identifier. These are CloudWatch logs. Amazon Web Services Mainframe Modernization pushes the application log to CloudWatch\n under the customer's account.
" + "smithy.api#documentation": "The list of log summaries. Each log summary includes the log type as well as the log\n group identifier. These are CloudWatch logs. The Amazon Web Services Mainframe Modernization application log is pushed to CloudWatch\n under the customer's account.
" } }, "creationTime": { @@ -3269,7 +2998,7 @@ "lastStartTime": { "target": "com.amazonaws.m2#Timestamp", "traits": { - "smithy.api#documentation": "The timestamp when you last started the application. Null until the application runs for the first time.
" + "smithy.api#documentation": "The timestamp when the application was last started. Null until the application has\n started running for the first time.
" } }, "tags": { @@ -3281,7 +3010,7 @@ "environmentId": { "target": "com.amazonaws.m2#Identifier", "traits": { - "smithy.api#documentation": "The identifier of the runtime environment where you want to deploy the application.
" + "smithy.api#documentation": "The identifier of the environment where the application will be deployed.
" } }, "targetGroupArns": { @@ -3293,7 +3022,7 @@ "listenerArns": { "target": "com.amazonaws.m2#ArnList", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) for the network load balancer listener created in your\n Amazon Web Services account. Amazon Web Services Mainframe Modernization creates this listener for you the first time you deploy an\n application.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) for the network load balancer listener created in your\n Amazon Web Services account. Amazon Web Services Mainframe Modernization creates this listener on your behalf the first time you deploy an\n application.
" } }, "listenerPorts": { @@ -3313,12 +3042,6 @@ "traits": { "smithy.api#documentation": "The reason for the reported status.
" } - }, - "kmsKeyId": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The identifier of a customer managed key.
" - } } } }, @@ -3405,7 +3128,7 @@ "definitionContent": { "target": "com.amazonaws.m2#StringFree65000", "traits": { - "smithy.api#documentation": "The content of the application definition. This is a JSON object that contains the\n resource configuration and definitions that identify an application.
", + "smithy.api#documentation": "The content of the application definition. This is a JSON object that contains the\n resource configuration/definitions that identify an application.
", "smithy.api#required": {} } }, @@ -3636,7 +3359,7 @@ "dataSetOrg": { "target": "com.amazonaws.m2#DatasetDetailOrgAttributes", "traits": { - "smithy.api#documentation": "The type of data set. The only supported value is VSAM.
" + "smithy.api#documentation": "The type of data set. Possible values include VSAM, IS, PS, GDG, PO, PS, or\n unknown.
" } }, "recordLength": { @@ -3649,7 +3372,7 @@ "location": { "target": "com.amazonaws.m2#String2000", "traits": { - "smithy.api#documentation": "The location where the data set is stored.
" + "smithy.api#documentation": "The locaion where the data set is stored.
" } }, "blocksize": { @@ -3929,7 +3652,7 @@ "name": { "target": "com.amazonaws.m2#EntityName", "traits": { - "smithy.api#documentation": "The name of the runtime environment. Must be unique within the account.
", + "smithy.api#documentation": "The name of the runtime environment.
", "smithy.api#required": {} } }, @@ -4053,9 +3776,9 @@ } }, "preferredMaintenanceWindow": { - "target": "com.amazonaws.m2#String50", + "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Configures the maintenance window you want for the runtime environment. If you do not provide a\n value, a random system-generated value will be assigned.
" + "smithy.api#documentation": "Configures a desired maintenance window for the environment. If you do not provide a\n value, a random system-generated value will be assigned.
" } }, "pendingMaintenance": { @@ -4063,12 +3786,6 @@ "traits": { "smithy.api#documentation": "Indicates the pending maintenance scheduled on this environment.
" } - }, - "kmsKeyId": { - "target": "smithy.api#String", - "traits": { - "smithy.api#documentation": "The identifier of a customer managed key.
" - } } } }, @@ -4257,7 +3974,7 @@ ], "traits": { "aws.api#controlPlane": {}, - "smithy.api#documentation": "Lists the applications associated with a specific Amazon Web Services account. You can provide the\n unique identifier of a specific runtime environment in a query parameter to see all applications\n associated with that environment.
", + "smithy.api#documentation": "Lists the applications associated with a specific Amazon Web Services account. You can provide the\n unique identifier of a specific environment in a query parameter to see all applications\n associated with that environment.
", "smithy.api#http": { "method": "GET", "uri": "/applications", @@ -4323,7 +4040,7 @@ "applications": { "target": "com.amazonaws.m2#ApplicationSummaryList", "traits": { - "smithy.api#documentation": "Returns a list of summary details for all the applications in a runtime environment.
", + "smithy.api#documentation": "Returns a list of summary details for all the applications in an environment.
", "smithy.api#required": {} } }, @@ -4365,7 +4082,7 @@ ], "traits": { "aws.api#controlPlane": {}, - "smithy.api#documentation": "Lists all the available batch job definitions based on the batch job resources uploaded\n during the application creation. You can use the batch job definitions in the list to start\n a batch job.
", + "smithy.api#documentation": "Lists all the available batch job definitions based on the batch job resources uploaded\n during the application creation. The listed batch job definitions can then be used to start\n a batch job.
", "smithy.api#http": { "method": "GET", "uri": "/applications/{applicationId}/batch-job-definitions", @@ -4692,7 +4409,7 @@ ], "traits": { "aws.api#controlPlane": {}, - "smithy.api#documentation": "Lists the data sets imported for a specific application. In Amazon Web Services Mainframe Modernization, data sets are\n associated with applications deployed on runtime environments. This is known as importing data\n sets. Currently, Amazon Web Services Mainframe Modernization can import data sets into catalogs using CreateDataSetImportTask.
", + "smithy.api#documentation": "Lists the data sets imported for a specific application. In Amazon Web Services Mainframe Modernization, data sets are\n associated with applications deployed on environments. This is known as importing data\n sets. Currently, Amazon Web Services Mainframe Modernization can import data sets into catalogs using CreateDataSetImportTask.
", "smithy.api#http": { "method": "GET", "uri": "/applications/{applicationId}/datasets", @@ -4754,7 +4471,7 @@ "dataSets": { "target": "com.amazonaws.m2#DataSetsSummaryList", "traits": { - "smithy.api#documentation": "The list of data sets, containing information including the creation time, the data set\n name, the data set organization, the data set format, and the last time the data set was\n referenced or updated.
", + "smithy.api#documentation": "The list of data sets, containing ionformation including the creating time, the data set\n name, the data set organization, the data set format, and the last time the data set was\n referenced or updated.
", "smithy.api#required": {} } }, @@ -5001,7 +4718,7 @@ "nextToken": { "target": "com.amazonaws.m2#NextToken", "traits": { - "smithy.api#documentation": "A pagination token to control the number of runtime environments displayed in the list.
", + "smithy.api#documentation": "A pagination token to control the number of environments displayed in the list.
", "smithy.api#httpQuery": "nextToken" } }, @@ -5009,21 +4726,21 @@ "target": "com.amazonaws.m2#MaxResults", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "The maximum number of runtime environments to return.
", + "smithy.api#documentation": "The maximum number of environments to return.
", "smithy.api#httpQuery": "maxResults" } }, "names": { "target": "com.amazonaws.m2#EntityNameList", "traits": { - "smithy.api#documentation": "The names of the runtime environments. Must be unique within the account.
", + "smithy.api#documentation": "The name of the environment.
", "smithy.api#httpQuery": "names" } }, "engineType": { "target": "com.amazonaws.m2#EngineType", "traits": { - "smithy.api#documentation": "The engine type for the runtime environment.
", + "smithy.api#documentation": "The engine type for the environment.
", "smithy.api#httpQuery": "engineType" } } @@ -5038,14 +4755,14 @@ "environments": { "target": "com.amazonaws.m2#EnvironmentSummaryList", "traits": { - "smithy.api#documentation": "Returns a list of summary details for all the runtime environments in your account.
", + "smithy.api#documentation": "Returns a list of summary details for all the environments in your account.
", "smithy.api#required": {} } }, "nextToken": { "target": "com.amazonaws.m2#NextToken", "traits": { - "smithy.api#documentation": "A pagination token that's returned when the response doesn't contain all the\n runtime environments.
" + "smithy.api#documentation": "A pagination token that's returned when the response doesn't contain all the\n environments.
" } } }, @@ -5147,7 +4864,7 @@ } }, "traits": { - "smithy.api#documentation": "A subset of the attributes that describe a log group. In CloudWatch a log group is a group of log\n streams that share the same retention, monitoring, and access control settings.
" + "smithy.api#documentation": "A subset of the attributes about a log group. In CloudWatch a log group is a group of log\n streams that share the same retention, monitoring, and access control settings.
" } }, "com.amazonaws.m2#MaintenanceSchedule": { @@ -5191,7 +4908,7 @@ "schedule": { "target": "com.amazonaws.m2#MaintenanceSchedule", "traits": { - "smithy.api#documentation": "The maintenance schedule for the runtime engine version.
" + "smithy.api#documentation": "The maintenance schedule for the engine version.
" } }, "engineVersion": { @@ -5578,7 +5295,7 @@ } }, "traits": { - "smithy.api#documentation": "Defines the storage configuration for a runtime environment.
" + "smithy.api#documentation": "Defines the storage configuration for an environment.
" } }, "com.amazonaws.m2#StorageConfigurationList": { @@ -5962,7 +5679,7 @@ ], "traits": { "aws.api#controlPlane": {}, - "smithy.api#documentation": "Updates the configuration details for a specific runtime environment.
", + "smithy.api#documentation": "Updates the configuration details for a specific environment.
", "smithy.api#http": { "method": "PATCH", "uri": "/environments/{environmentId}", @@ -5985,32 +5702,32 @@ "target": "com.amazonaws.m2#CapacityValue", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "The desired capacity for the runtime environment to update.
" + "smithy.api#documentation": "The desired capacity for the environment to update.
" } }, "instanceType": { "target": "com.amazonaws.m2#String20", "traits": { - "smithy.api#documentation": "The instance type for the runtime environment to update.
" + "smithy.api#documentation": "The instance type for the environment to update.
" } }, "engineVersion": { "target": "com.amazonaws.m2#EngineVersion", "traits": { - "smithy.api#documentation": "The version of the runtime engine for the runtime environment.
" + "smithy.api#documentation": "The version of the runtime engine for the environment.
" } }, "preferredMaintenanceWindow": { "target": "smithy.api#String", "traits": { - "smithy.api#documentation": "Configures the maintenance window you want for the runtime environment. If you do not provide a\n value, a random system-generated value will be assigned.
" + "smithy.api#documentation": "Configures a desired maintenance window for the environment. If you do not provide a\n value, a random system-generated value will be assigned.
" } }, "applyDuringMaintenanceWindow": { "target": "com.amazonaws.m2#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether to update the runtime environment during the maintenance window. The default\n is false. Currently, Amazon Web Services Mainframe Modernization accepts the engineVersion
parameter only if\n applyDuringMaintenanceWindow
is true. If any parameter other than\n engineVersion
is provided in UpdateEnvironmentRequest
, it will\n fail if applyDuringMaintenanceWindow
is set to true.
Indicates whether to update the environment during the maintenance window. The default\n is false. Currently, Amazon Web Services Mainframe Modernization accepts the engineVersion
parameter only if\n applyDuringMaintenanceWindow
is true. If any parameter other than\n engineVersion
is provided in UpdateEnvironmentRequest
, it will\n fail if applyDuringMaintenanceWindow
is set to true.
Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -109,7 +109,7 @@ } }, "traits": { - "smithy.api#documentation": "You are not authorized to perform this operation. Check your IAM\n policies, and ensure that you are using the correct access keys.
", + "smithy.api#documentation": "You are not authorized to perform this operation. Check your IAM policies, and ensure\n that you are using the correct access keys.
", "smithy.api#error": "client", "smithy.api#httpError": 403 } @@ -180,7 +180,7 @@ "directoryId": { "target": "com.amazonaws.nimble#DirectoryId", "traits": { - "smithy.api#documentation": "The directory ID of the Directory Service for Microsoft Active Directory to access using this studio\n component.
" + "smithy.api#documentation": "The directory ID of the Directory Service for Microsoft Active Directory to access\n using this studio component.
" } }, "organizationalUnitDistinguishedName": { @@ -191,7 +191,7 @@ } }, "traits": { - "smithy.api#documentation": "The configuration for a Directory Service for Microsoft Active Directory studio resource.
" + "smithy.api#documentation": "The configuration for a Microsoft Active Directory (Microsoft AD) studio\n resource.
" } }, "com.amazonaws.nimble#ActiveDirectoryDnsIpAddress": { @@ -219,23 +219,6 @@ } } }, - "com.amazonaws.nimble#AutomaticTerminationMode": { - "type": "enum", - "members": { - "DEACTIVATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DEACTIVATED" - } - }, - "ACTIVATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVATED" - } - } - } - }, "com.amazonaws.nimble#ClientToken": { "type": "string", "traits": { @@ -354,7 +337,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -414,7 +397,7 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } } }, @@ -500,7 +483,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -538,7 +521,7 @@ "target": "com.amazonaws.nimble#Tags", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } } }, @@ -609,7 +592,7 @@ "nimble:GetLaunchProfile", "nimble:ListEulaAcceptances" ], - "smithy.api#documentation": "Creates a streaming session in a studio.
\nAfter invoking this operation, you must poll GetStreamingSession until the streaming\n session is in the READY
state.
Creates a streaming session in a studio.
\nAfter invoking this operation, you must poll GetStreamingSession until the streaming\n session is in state READY.
", "smithy.api#http": { "method": "POST", "uri": "/2020-08-01/studios/{studioId}/streaming-sessions", @@ -623,7 +606,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -637,8 +620,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", - "smithy.api#required": {} + "smithy.api#documentation": "The launch profile ID.
" } }, "ownedBy": { @@ -664,7 +646,7 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } } }, @@ -727,7 +709,7 @@ "aws.iam#conditionKeys": [ "nimble:requesterPrincipalId" ], - "smithy.api#documentation": "Creates a streaming session stream for a streaming session.
\nAfter invoking this API, invoke GetStreamingSessionStream with the returned streamId\n to poll the resource until it is in the READY
state.
Creates a streaming session stream for a streaming session.
\nAfter invoking this API, invoke GetStreamingSessionStream with the returned streamId\n to poll the resource until it is in state READY.
", "smithy.api#http": { "method": "POST", "uri": "/2020-08-01/studios/{studioId}/streaming-sessions/{sessionId}/streams", @@ -742,7 +724,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -750,7 +732,7 @@ "expirationInSeconds": { "target": "com.amazonaws.nimble#StreamingSessionStreamExpirationInSeconds", "traits": { - "smithy.api#default": 120, + "smithy.api#default": 0, "smithy.api#documentation": "The expiration time in seconds.
" } }, @@ -835,7 +817,7 @@ "sso:CreateManagedApplicationInstance", "iam:PassRole" ], - "smithy.api#documentation": "Create a new studio.
\nWhen creating a studio, two IAM roles must be provided: the admin role\n and the user role. These roles are assumed by your users when they log in to the Nimble Studio portal.
\nThe user role must have the AmazonNimbleStudio-StudioUser
managed policy\n attached for the portal to function properly.
The admin role must have the AmazonNimbleStudio-StudioAdmin
managed\n policy attached for the portal to function properly.
You may optionally specify a KMS key in the\n StudioEncryptionConfiguration
.
In Nimble Studio, resource names, descriptions, initialization scripts, and other\n data you provide are always encrypted at rest using an KMS key. By default, this key is\n owned by Amazon Web Services and managed on your behalf. You may provide your own KMS key\n when calling CreateStudio
to encrypt this data using a key you own and\n manage.
When providing an KMS key during studio creation, Nimble Studio creates KMS\n grants in your account to provide your studio user and admin roles access to these KMS\n keys.
\nIf you delete this grant, the studio will no longer be accessible to your portal\n users.
\nIf you delete the studio KMS key, your studio will no longer be accessible.
", + "smithy.api#documentation": "Create a new Studio.
\nWhen creating a Studio, two IAM roles must be provided: the admin role and the user\n Role. These roles are assumed by your users when they log in to the Nimble Studio\n portal.
\nThe user role must have the AmazonNimbleStudio-StudioUser managed policy attached for\n the portal to function properly.
\nThe Admin Role must have the AmazonNimbleStudio-StudioAdmin managed policy attached\n for the portal to function properly.
\nYou may optionally specify a KMS key in the StudioEncryptionConfiguration.
\nIn Nimble Studio, resource names, descriptions, initialization scripts, and other\n data you provide are always encrypted at rest using an KMS key. By default, this key is\n owned by Amazon Web Services and managed on your behalf. You may provide your own KMS\n key when calling CreateStudio to encrypt this data using a key you own and\n manage.
\nWhen providing an KMS key during studio creation, Nimble Studio creates KMS\n grants in your account to provide your studio user and admin roles access to these KMS\n keys.
\nIf you delete this grant, the studio will no longer be accessible to your portal\n users.
\nIf you delete the studio KMS key, your studio will no longer be accessible.
", "smithy.api#http": { "method": "POST", "uri": "/2020-08-01/studios", @@ -901,7 +883,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -963,7 +945,7 @@ "target": "com.amazonaws.nimble#Tags", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } }, "type": { @@ -977,14 +959,14 @@ "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization\n script runs which give the studio component access to Amazon Web Services resources when\n the system initialization script runs.
" + "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
" } }, "runtimeRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio\n component access to Amazon Web Services resources at anytime while the instance is\n running.
" + "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
" } } }, @@ -1017,7 +999,7 @@ "adminRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { - "smithy.api#documentation": "The IAM role that studio admins will assume when logging in to the\n Nimble Studio portal.
", + "smithy.api#documentation": "The IAM role that Studio Admins will assume when logging in to the Nimble Studio\n portal.
", "smithy.api#required": {} } }, @@ -1025,7 +1007,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1055,14 +1037,14 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } }, "userRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "The IAM role that studio users will assume when logging in to the\n Nimble Studio portal.
", + "smithy.api#documentation": "The IAM role that Studio Users will assume when logging in to the Nimble Studio\n portal.
", "smithy.api#required": {} } } @@ -1175,7 +1157,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1183,7 +1165,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1223,7 +1205,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1231,7 +1213,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -1318,7 +1300,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1397,7 +1379,7 @@ "aws.iam#requiredActions": [ "ec2:DeleteNetworkInterface" ], - "smithy.api#documentation": "Deletes streaming session resource.
\nAfter invoking this operation, use GetStreamingSession to poll the resource until it\n transitions to a DELETED
state.
A streaming session will count against your streaming session quota until it is marked\n DELETED
.
Deletes streaming session resource.
\nAfter invoking this operation, use GetStreamingSession to poll the resource until it\n transitions to a DELETED state.
\nA streaming session will count against your streaming session quota until it is marked\n DELETED.
", "smithy.api#http": { "method": "DELETE", "uri": "/2020-08-01/studios/{studioId}/streaming-sessions/{sessionId}", @@ -1412,7 +1394,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1546,7 +1528,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1634,7 +1616,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1673,7 +1655,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -1742,7 +1724,7 @@ "createdAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "eulaId": { @@ -1760,7 +1742,7 @@ "updatedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was updated.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was updated.
" } } }, @@ -1774,7 +1756,7 @@ "acceptedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the EULA was accepted.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the EULA was accepted.
" } }, "acceptedBy": { @@ -1923,7 +1905,7 @@ "target": "com.amazonaws.nimble#String" }, "traits": { - "smithy.api#documentation": "ExceptionContext is a set of key-value pairs that provide you with more information\n about the error that occurred. For example, when the service returns a 404\n ResourceNotFound error, ExceptionContext will contain the key `resourceId` with the\n value of resource that was not found.
\n\n ExceptionContext
allows scripts and other programmatic clients to provide\n better error handling.
ExceptionContext is a set of key-value pairs that provide you with more information\n about the error that occurred. For example, when the service returns a 404\n ResourceNotFound error, ExceptionContext will contain the key `resourceId` with the\n value of resource that was not found.
\nExceptionContext enables scripts and other programmatic clients to provide for better\n error handling.
" } }, "com.amazonaws.nimble#GetEula": { @@ -1959,7 +1941,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to get a EULA", - "smithy.api#documentation": "Get EULA.
", + "smithy.api#documentation": "Get Eula.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/eulas/{eulaId}", @@ -2154,7 +2136,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2251,7 +2233,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2275,7 +2257,7 @@ "platform": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The platform where this Launch Profile will be used, either Windows or Linux.
", + "smithy.api#documentation": "The platform where this Launch Profile will be used, either WINDOWS or LINUX.
", "smithy.api#httpQuery": "platform", "smithy.api#required": {} } @@ -2355,7 +2337,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2401,7 +2383,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -2715,86 +2697,6 @@ } } }, - "com.amazonaws.nimble#GetStreamingSessionBackup": { - "type": "operation", - "input": { - "target": "com.amazonaws.nimble#GetStreamingSessionBackupRequest" - }, - "output": { - "target": "com.amazonaws.nimble#GetStreamingSessionBackupResponse" - }, - "errors": [ - { - "target": "com.amazonaws.nimble#AccessDeniedException" - }, - { - "target": "com.amazonaws.nimble#ConflictException" - }, - { - "target": "com.amazonaws.nimble#InternalServerErrorException" - }, - { - "target": "com.amazonaws.nimble#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.nimble#ThrottlingException" - }, - { - "target": "com.amazonaws.nimble#ValidationException" - } - ], - "traits": { - "aws.iam#actionPermissionDescription": "Grants permission to get a streaming session backup", - "aws.iam#conditionKeys": [ - "nimble:requesterPrincipalId" - ], - "smithy.api#documentation": "Gets StreamingSessionBackup
resource.
Invoke this operation to poll for a streaming session backup while stopping a\n streaming session.
", - "smithy.api#http": { - "method": "GET", - "uri": "/2020-08-01/studios/{studioId}/streaming-session-backups/{backupId}", - "code": 200 - }, - "smithy.api#readonly": {} - } - }, - "com.amazonaws.nimble#GetStreamingSessionBackupRequest": { - "type": "structure", - "members": { - "backupId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The ID of the backup.
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - }, - "studioId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The studio ID.
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.nimble#GetStreamingSessionBackupResponse": { - "type": "structure", - "members": { - "streamingSessionBackup": { - "target": "com.amazonaws.nimble#StreamingSessionBackup", - "traits": { - "smithy.api#documentation": "Information about the streaming session backup.
" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, "com.amazonaws.nimble#GetStreamingSessionRequest": { "type": "structure", "members": { @@ -2869,7 +2771,7 @@ "aws.iam#conditionKeys": [ "nimble:requesterPrincipalId" ], - "smithy.api#documentation": "Gets a StreamingSessionStream for a streaming session.
\nInvoke this operation to poll the resource after invoking\n CreateStreamingSessionStream
.
After the StreamingSessionStream
changes to the READY
state,\n the url property will contain a stream to be used with the DCV streaming client.
Gets a StreamingSessionStream for a streaming session.
\nInvoke this operation to poll the resource after invoking\n CreateStreamingSessionStream.
\nAfter the StreamingSessionStream changes to the state READY, the url property will\n contain a stream to be used with the DCV streaming client.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/studios/{studioId}/streaming-sessions/{sessionId}/streams/{streamId}", @@ -2986,7 +2888,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to get a studio", - "smithy.api#documentation": "Get a studio resource.
", + "smithy.api#documentation": "Get a Studio resource.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/studios/{studioId}", @@ -3350,14 +3252,14 @@ "target": "com.amazonaws.nimble#String", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The Amazon Resource Name (ARN) that is assigned to a studio resource and uniquely\n identifies it. ARNs are unique across all Regions.
" + "smithy.api#documentation": "The ARN of the resource.
" } }, "createdAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "createdBy": { @@ -3384,7 +3286,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#LaunchProfileId", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
" + "smithy.api#documentation": "The launch profile ID.
" } }, "launchProfileProtocolVersions": { @@ -3440,14 +3342,14 @@ "target": "com.amazonaws.nimble#Tags", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } }, "updatedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was updated.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was updated.
" } }, "updatedBy": { @@ -3466,7 +3368,7 @@ } }, "traits": { - "smithy.api#documentation": "A launch profile controls your artist workforce’s access to studio components, like\n compute farms, shared file systems, managed file systems, and license server\n configurations, as well as instance types and Amazon Machine Images (AMIs).
\nStudio administrators create launch profiles in the Nimble Studio console.\n Artists can use their launch profiles to launch an instance from the Nimble Studio\n portal. Each user’s launch profile defines how they can launch a streaming session. By\n default, studio admins can use all launch profiles.
" + "smithy.api#documentation": "A launch profile controls your artist workforce’s access to studio components, like\n compute farms, shared file systems, managed file systems, and license server\n configurations, as well as instance types and Amazon Machine Images (AMIs).
\n\nStudio administrators create launch profiles in the Nimble Studio console.\n Artists can use their launch profiles to launch an instance from the Nimble Studio\n portal. Each user’s launch profile defines how they can launch a streaming session. By\n default, studio admins can use all launch profiles.
" } }, "com.amazonaws.nimble#LaunchProfileDescription": { @@ -3496,7 +3398,7 @@ "activeDirectory": { "target": "com.amazonaws.nimble#LaunchProfileInitializationActiveDirectory", "traits": { - "smithy.api#documentation": "A LaunchProfileInitializationActiveDirectory
resource.
A LaunchProfileInitializationActiveDirectory resource.
" } }, "ec2SecurityGroupIds": { @@ -3508,7 +3410,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#LaunchProfileId", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
" + "smithy.api#documentation": "The launch profile ID.
" } }, "launchProfileProtocolVersion": { @@ -3532,7 +3434,7 @@ "platform": { "target": "com.amazonaws.nimble#LaunchProfilePlatform", "traits": { - "smithy.api#documentation": "The platform of the launch platform, either Windows or Linux.
" + "smithy.api#documentation": "The platform of the launch platform, either WINDOWS or LINUX.
" } }, "systemInitializationScripts": { @@ -3549,7 +3451,7 @@ } }, "traits": { - "smithy.api#documentation": "A launch profile initialization contains information required for a workstation or\n server to connect to a launch profile.
\nThis includes scripts, endpoints, security groups, subnets, and other\n configuration.
" + "smithy.api#documentation": "A Launch Profile Initialization contains information required for a workstation or\n server to connect to a launch profile.
\nThis includes scripts, endpoints, security groups, subnets, and other\n configuration.
" } }, "com.amazonaws.nimble#LaunchProfileInitializationActiveDirectory": { @@ -3564,7 +3466,7 @@ "directoryId": { "target": "com.amazonaws.nimble#DirectoryId", "traits": { - "smithy.api#documentation": "The directory ID of the Directory Service for Microsoft Active Directory to access using this launch\n profile.
" + "smithy.api#documentation": "The directory ID of the Directory Service for Microsoft Active Directory to access\n using this launch profile.
" } }, "directoryName": { @@ -3599,7 +3501,7 @@ } }, "traits": { - "smithy.api#documentation": "The launch profile initialization Active Directory contains information required for\n the launch profile to connect to the Active Directory.
" + "smithy.api#documentation": "The Launch Profile Initialization Active Directory contains information required for\n the launch profile to connect to the Active Directory.
" } }, "com.amazonaws.nimble#LaunchProfileInitializationScript": { @@ -3626,18 +3528,18 @@ "secureInitializationRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { - "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization\n script runs which give the studio component access to Amazon Web Services resources when\n the system initialization script runs.
" + "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
" } }, "runtimeRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { - "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio\n component access to Amazon Web Services resources at anytime while the instance is\n running.
" + "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
" } } }, "traits": { - "smithy.api#documentation": "The launch profile initialization script is used when start streaming session\n runs.
" + "smithy.api#documentation": "The Launch Profile Initialization Script is used when start streaming session\n runs.
" } }, "com.amazonaws.nimble#LaunchProfileInitializationScriptList": { @@ -3681,7 +3583,7 @@ } }, "traits": { - "smithy.api#documentation": "Studio admins can use launch profile membership to delegate launch profile access to\n studio users in the Nimble Studio portal without writing or maintaining complex\n IAM policies. A launch profile member is a user association from your\n studio identity source who is granted permissions to a launch profile.
\nA launch profile member (type USER) provides the following permissions to that launch\n profile:
\nGetLaunchProfile
\nGetLaunchProfileInitialization
\nGetLaunchProfileMembers
\nGetLaunchProfileMember
\nCreateStreamingSession
\nGetLaunchProfileDetails
\nLaunch profile membership enables your studio admins to delegate launch profile access\n to other studio users in the Nimble Studio portal without needing to write or\n maintain complex IAM policies. A launch profile member is a user association from your\n studio identity source who is granted permissions to a launch profile.
\nA launch profile member (type USER) provides the following permissions to that launch\n profile:
\nGetLaunchProfile
\nGetLaunchProfileInitialization
\nGetLaunchProfileMembers
\nGetLaunchProfileMember
\nCreateStreamingSession
\nGetLaunchProfileDetails
\nList EULA acceptances.
", + "smithy.api#documentation": "List Eula Acceptances.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/studios/{studioId}/eula-acceptances", @@ -4221,7 +4049,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -4291,7 +4119,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to list EULAs", - "smithy.api#documentation": "List EULAs.
", + "smithy.api#documentation": "List Eulas.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/eulas", @@ -4318,7 +4146,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } } @@ -4401,7 +4229,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -4409,7 +4237,7 @@ "maxResults": { "target": "com.amazonaws.nimble#MaxResults", "traits": { - "smithy.api#default": 100, + "smithy.api#default": 0, "smithy.api#documentation": "The max number of results to return in the response.
", "smithy.api#httpQuery": "maxResults" } @@ -4417,7 +4245,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -4512,7 +4340,7 @@ "maxResults": { "target": "com.amazonaws.nimble#MaxResults", "traits": { - "smithy.api#default": 100, + "smithy.api#default": 0, "smithy.api#documentation": "The max number of results to return in the response.
", "smithy.api#httpQuery": "maxResults" } @@ -4520,7 +4348,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -4624,7 +4452,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -4668,104 +4496,6 @@ "smithy.api#output": {} } }, - "com.amazonaws.nimble#ListStreamingSessionBackups": { - "type": "operation", - "input": { - "target": "com.amazonaws.nimble#ListStreamingSessionBackupsRequest" - }, - "output": { - "target": "com.amazonaws.nimble#ListStreamingSessionBackupsResponse" - }, - "errors": [ - { - "target": "com.amazonaws.nimble#AccessDeniedException" - }, - { - "target": "com.amazonaws.nimble#ConflictException" - }, - { - "target": "com.amazonaws.nimble#InternalServerErrorException" - }, - { - "target": "com.amazonaws.nimble#ResourceNotFoundException" - }, - { - "target": "com.amazonaws.nimble#ThrottlingException" - }, - { - "target": "com.amazonaws.nimble#ValidationException" - } - ], - "traits": { - "aws.iam#actionPermissionDescription": "Grants permission to list streaming session backups", - "aws.iam#conditionKeys": [ - "nimble:ownedBy", - "nimble:requesterPrincipalId" - ], - "smithy.api#documentation": "Lists the backups of a streaming session in a studio.
", - "smithy.api#http": { - "method": "GET", - "uri": "/2020-08-01/studios/{studioId}/streaming-session-backups", - "code": 200 - }, - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "items": "streamingSessionBackups" - }, - "smithy.api#readonly": {} - } - }, - "com.amazonaws.nimble#ListStreamingSessionBackupsRequest": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", - "smithy.api#httpQuery": "nextToken" - } - }, - "ownedBy": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The user ID of the user that owns the streaming session.
", - "smithy.api#httpQuery": "ownedBy" - } - }, - "studioId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The studio ID.
", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#input": {} - } - }, - "com.amazonaws.nimble#ListStreamingSessionBackupsResponse": { - "type": "structure", - "members": { - "nextToken": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
" - } - }, - "streamingSessionBackups": { - "target": "com.amazonaws.nimble#StreamingSessionBackupList", - "traits": { - "smithy.api#documentation": "Information about the streaming session backups.
" - } - } - }, - "traits": { - "smithy.api#output": {} - } - }, "com.amazonaws.nimble#ListStreamingSessions": { "type": "operation", "input": { @@ -4831,7 +4561,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -4915,7 +4645,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to list studio components", - "smithy.api#documentation": "Lists the StudioComponents
in a studio.
Lists the StudioComponents in a studio.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/studios/{studioId}/studio-components", @@ -4936,7 +4666,7 @@ "maxResults": { "target": "com.amazonaws.nimble#MaxResults", "traits": { - "smithy.api#default": 100, + "smithy.api#default": 0, "smithy.api#documentation": "The max number of results to return in the response.
", "smithy.api#httpQuery": "maxResults" } @@ -4944,7 +4674,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -5049,7 +4779,7 @@ "maxResults": { "target": "com.amazonaws.nimble#MaxResults", "traits": { - "smithy.api#default": 100, + "smithy.api#default": 0, "smithy.api#documentation": "The max number of results to return in the response.
", "smithy.api#httpQuery": "maxResults" } @@ -5057,7 +4787,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } }, @@ -5127,7 +4857,7 @@ ], "traits": { "aws.iam#actionPermissionDescription": "Grants permission to list studios", - "smithy.api#documentation": "List studios in your Amazon Web Services accounts in the requested Amazon Web Services Region.
", + "smithy.api#documentation": "List studios in your Amazon Web Services account in the requested Amazon Web Services\n Region.
", "smithy.api#http": { "method": "GET", "uri": "/2020-08-01/studios", @@ -5147,7 +4877,7 @@ "nextToken": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The token for the next set of results, or null if there are no more results.
", + "smithy.api#documentation": "The token to request the next page of results.
", "smithy.api#httpQuery": "nextToken" } } @@ -5273,7 +5003,7 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } } }, @@ -5284,7 +5014,7 @@ "com.amazonaws.nimble#MaxResults": { "type": "integer", "traits": { - "smithy.api#default": 100, + "smithy.api#default": 0, "smithy.api#range": { "min": 1, "max": 100 @@ -5409,7 +5139,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -5424,7 +5154,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5506,7 +5236,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -5620,7 +5350,7 @@ } }, "traits": { - "smithy.api#documentation": "A parameter for a studio component script, in the form of a key-value pair.
" + "smithy.api#documentation": "A parameter for a studio component script, in the form of a key:value pair.
" } }, "com.amazonaws.nimble#ScriptParameterValue": { @@ -5665,45 +5395,11 @@ } }, "traits": { - "smithy.api#documentation": "Your current quota does not allow you to perform the request action. You can request\n increases for some quotas, and other quotas cannot be increased.
\nPlease use Amazon Web Services Service Quotas to request an increase.
", + "smithy.api#documentation": "Your current quota does not allow you to perform the request action. You can request\n increases for some quotas, and other quotas cannot be increased.
\nPlease use AWS Service Quotas to request an increase.
", "smithy.api#error": "client", "smithy.api#httpError": 402 } }, - "com.amazonaws.nimble#SessionBackupMode": { - "type": "enum", - "members": { - "AUTOMATIC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AUTOMATIC" - } - }, - "DEACTIVATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DEACTIVATED" - } - } - } - }, - "com.amazonaws.nimble#SessionPersistenceMode": { - "type": "enum", - "members": { - "DEACTIVATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DEACTIVATED" - } - }, - "ACTIVATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVATED" - } - } - } - }, "com.amazonaws.nimble#SharedFileSystemConfiguration": { "type": "structure", "members": { @@ -5782,7 +5478,7 @@ "nimble:GetLaunchProfile", "nimble:GetLaunchProfileMember" ], - "smithy.api#documentation": "Transitions sessions from the STOPPED
state into the READY
\n state. The START_IN_PROGRESS
state is the intermediate state between the\n STOPPED
and READY
states.
Transitions sessions from the STOPPED state into the READY state. The\n START_IN_PROGRESS state is the intermediate state between the STOPPED and READY\n states.
", "smithy.api#http": { "method": "POST", "uri": "/2020-08-01/studios/{studioId}/streaming-sessions/{sessionId}/start", @@ -5797,7 +5493,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -5805,7 +5501,7 @@ "sessionId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The streaming session ID for the StartStreamingSessionRequest
.
The streaming session ID for the StartStreamingSessionRequest.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5817,12 +5513,6 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "backupId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The ID of the backup.
" - } } }, "traits": { @@ -5877,7 +5567,7 @@ "sso:CreateManagedApplicationInstance", "sso:GetManagedApplicationInstance" ], - "smithy.api#documentation": "Repairs the IAM Identity Center configuration for a given studio.
\nIf the studio has a valid IAM Identity Center configuration currently associated with\n it, this operation will fail with a validation error.
\nIf the studio does not have a valid IAM Identity Center configuration currently\n associated with it, then a new IAM Identity Center application is created for the studio\n and the studio is changed to the READY
state.
After the IAM Identity Center application is repaired, you must use the Amazon Nimble Studio console to add administrators and users to your studio.
", + "smithy.api#documentation": "Repairs the IAM Identity Center configuration for a given studio.
\nIf the studio has a valid IAM Identity Center configuration currently associated with\n it, this operation will fail with a validation error.
\nIf the studio does not have a valid IAM Identity Center configuration currently\n associated with it, then a new IAM Identity Center application is created for the studio\n and the studio is changed to the READY state.
\nAfter the IAM Identity Center application is repaired, you must use the Amazon Nimble Studio console to add administrators and users to your studio.
", "smithy.api#http": { "method": "PUT", "uri": "/2020-08-01/studios/{studioId}/sso-configuration", @@ -5892,7 +5582,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -5964,7 +5654,7 @@ "aws.iam#requiredActions": [ "nimble:GetLaunchProfile" ], - "smithy.api#documentation": "Transitions sessions from the READY
state into the STOPPED
\n state. The STOP_IN_PROGRESS
state is the intermediate state between the\n READY
and STOPPED
states.
Transitions sessions from the READY state into the STOPPED state. The STOP_IN_PROGRESS\n state is the intermediate state between the READY and STOPPED states.
", "smithy.api#http": { "method": "POST", "uri": "/2020-08-01/studios/{studioId}/streaming-sessions/{sessionId}/stop", @@ -5979,7 +5669,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -5987,7 +5677,7 @@ "sessionId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The streaming session ID for the StopStreamingSessionRequest
.
The streaming session ID for the StopStreamingSessionRequest.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5999,12 +5689,6 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } - }, - "volumeRetentionMode": { - "target": "com.amazonaws.nimble#VolumeRetentionMode", - "traits": { - "smithy.api#documentation": "Adds additional instructions to a streaming session stop action to either retain the\n EBS volumes or delete the EBS volumes.
" - } } }, "traits": { @@ -6028,7 +5712,7 @@ "clipboardMode": { "target": "com.amazonaws.nimble#StreamingClipboardMode", "traits": { - "smithy.api#documentation": "Allows or deactivates the use of the system clipboard to copy and paste between the\n streaming session and streaming client.
", + "smithy.api#documentation": "Enable or disable the use of the system clipboard to copy and paste between the\n streaming session and streaming client.
", "smithy.api#required": {} } }, @@ -6042,7 +5726,7 @@ "maxSessionLengthInMinutes": { "target": "com.amazonaws.nimble#StreamConfigurationMaxSessionLengthInMinutes", "traits": { - "smithy.api#default": 690, + "smithy.api#default": 0, "smithy.api#documentation": "The length of time, in minutes, that a streaming session can be active before it is\n stopped or terminated. After this point, Nimble Studio automatically terminates or\n stops the session. The default length of time is 690 minutes, and the maximum length of\n time is 30 days.
" } }, @@ -6057,37 +5741,13 @@ "target": "com.amazonaws.nimble#StreamConfigurationMaxStoppedSessionLengthInMinutes", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Integer that determines if you can start and stop your sessions and how long a session\n can stay in the STOPPED
state. The default value is 0. The maximum value is\n 5760.
This field is allowed only when sessionPersistenceMode
is\n ACTIVATED
and automaticTerminationMode
is\n ACTIVATED
.
If the value is set to 0, your sessions can’t be STOPPED
. If you then\n call StopStreamingSession
, the session fails. If the time that a session\n stays in the READY
state exceeds the maxSessionLengthInMinutes
\n value, the session will automatically be terminated (instead of\n STOPPED
).
If the value is set to a positive number, the session can be stopped. You can call\n StopStreamingSession
to stop sessions in the READY
state.\n If the time that a session stays in the READY
state exceeds the\n maxSessionLengthInMinutes
value, the session will automatically be\n stopped (instead of terminated).
Integer that determines if you can start and stop your sessions and how long a session\n can stay in the STOPPED state. The default value is 0. The maximum value is 5760.
\nIf the value is missing or set to 0, your sessions can’t be stopped. If you then call\n StopStreamingSession
, the session fails. If the time that a session\n stays in the READY state exceeds the maxSessionLengthInMinutes
value, the\n session will automatically be terminated (instead of stopped).
If the value is set to a positive number, the session can be stopped. You can call\n StopStreamingSession
to stop sessions in the READY state. If the time\n that a session stays in the READY state exceeds the\n maxSessionLengthInMinutes
value, the session will automatically be\n stopped (instead of terminated).
The upload storage for a streaming session.
" - } - }, - "sessionBackup": { - "target": "com.amazonaws.nimble#StreamConfigurationSessionBackup", - "traits": { - "smithy.api#documentation": "Information about the streaming session backup.
" - } - }, - "sessionPersistenceMode": { - "target": "com.amazonaws.nimble#SessionPersistenceMode", - "traits": { - "smithy.api#documentation": "Determine if a streaming session created from this launch profile can configure\n persistent storage. This means that volumeConfiguration
and\n automaticTerminationMode
are configured.
Custom volume configuration for the root volumes that are attached to streaming\n sessions.
\nThis parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
.
Indicates if a streaming session created from this launch profile should be terminated\n automatically or retained without termination after being in a STOPPED
\n state.
When ACTIVATED
, the streaming session is scheduled for\n termination after being in the STOPPED
state for the time specified\n in maxStoppedSessionLengthInMinutes
.
When DEACTIVATED
, the streaming session can remain in the\n STOPPED
state indefinitely.
This parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
. When allowed, the default value for this parameter is\n DEACTIVATED
.
(Optional) The upload storage for a streaming session.
" } } }, @@ -6101,7 +5761,7 @@ "clipboardMode": { "target": "com.amazonaws.nimble#StreamingClipboardMode", "traits": { - "smithy.api#documentation": "Allows or deactivates the use of the system clipboard to copy and paste between the\n streaming session and streaming client.
", + "smithy.api#documentation": "Enable or disable the use of the system clipboard to copy and paste between the\n streaming session and streaming client.
", "smithy.api#required": {} } }, @@ -6115,7 +5775,7 @@ "maxSessionLengthInMinutes": { "target": "com.amazonaws.nimble#StreamConfigurationMaxSessionLengthInMinutes", "traits": { - "smithy.api#default": 690, + "smithy.api#default": 0, "smithy.api#documentation": "The length of time, in minutes, that a streaming session can be active before it is\n stopped or terminated. After this point, Nimble Studio automatically terminates or\n stops the session. The default length of time is 690 minutes, and the maximum length of\n time is 30 days.
" } }, @@ -6130,37 +5790,13 @@ "target": "com.amazonaws.nimble#StreamConfigurationMaxStoppedSessionLengthInMinutes", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Integer that determines if you can start and stop your sessions and how long a session\n can stay in the STOPPED
state. The default value is 0. The maximum value is\n 5760.
This field is allowed only when sessionPersistenceMode
is\n ACTIVATED
and automaticTerminationMode
is\n ACTIVATED
.
If the value is set to 0, your sessions can’t be STOPPED
. If you then\n call StopStreamingSession
, the session fails. If the time that a session\n stays in the READY
state exceeds the maxSessionLengthInMinutes
\n value, the session will automatically be terminated (instead of\n STOPPED
).
If the value is set to a positive number, the session can be stopped. You can call\n StopStreamingSession
to stop sessions in the READY
state.\n If the time that a session stays in the READY
state exceeds the\n maxSessionLengthInMinutes
value, the session will automatically be\n stopped (instead of terminated).
Integer that determines if you can start and stop your sessions and how long a session\n can stay in the STOPPED state. The default value is 0. The maximum value is 5760.
\nIf the value is missing or set to 0, your sessions can’t be stopped. If you then call\n StopStreamingSession
, the session fails. If the time that a session\n stays in the READY state exceeds the maxSessionLengthInMinutes
value, the\n session will automatically be terminated (instead of stopped).
If the value is set to a positive number, the session can be stopped. You can call\n StopStreamingSession
to stop sessions in the READY state. If the time\n that a session stays in the READY state exceeds the\n maxSessionLengthInMinutes
value, the session will automatically be\n stopped (instead of terminated).
The upload storage for a streaming workstation that is created using this launch\n profile.
" - } - }, - "sessionBackup": { - "target": "com.amazonaws.nimble#StreamConfigurationSessionBackup", - "traits": { - "smithy.api#documentation": "Configures how streaming sessions are backed up when launched from this launch\n profile.
" - } - }, - "sessionPersistenceMode": { - "target": "com.amazonaws.nimble#SessionPersistenceMode", - "traits": { - "smithy.api#documentation": "Determine if a streaming session created from this launch profile can configure\n persistent storage. This means that volumeConfiguration
and\n automaticTerminationMode
are configured.
Custom volume configuration for the root volumes that are attached to streaming\n sessions.
\nThis parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
.
Indicates if a streaming session created from this launch profile should be terminated\n automatically or retained without termination after being in a STOPPED
\n state.
When ACTIVATED
, the streaming session is scheduled for\n termination after being in the STOPPED
state for the time specified\n in maxStoppedSessionLengthInMinutes
.
When DEACTIVATED
, the streaming session can remain in the\n STOPPED
state indefinitely.
This parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
. When allowed, the default value for this parameter is\n DEACTIVATED
.
(Optional) The upload storage for a streaming workstation that is created using this\n launch profile.
" } } }, @@ -6168,20 +5804,10 @@ "smithy.api#documentation": "Configuration for streaming workstations created using this launch profile.
" } }, - "com.amazonaws.nimble#StreamConfigurationMaxBackupsToRetain": { - "type": "integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#range": { - "min": 0, - "max": 10 - } - } - }, "com.amazonaws.nimble#StreamConfigurationMaxSessionLengthInMinutes": { "type": "integer", "traits": { - "smithy.api#default": 690, + "smithy.api#default": 0, "smithy.api#range": { "min": 1, "max": 43200 @@ -6198,27 +5824,6 @@ } } }, - "com.amazonaws.nimble#StreamConfigurationSessionBackup": { - "type": "structure", - "members": { - "mode": { - "target": "com.amazonaws.nimble#SessionBackupMode", - "traits": { - "smithy.api#documentation": "Specifies how artists sessions are backed up.
\nConfigures backups for streaming sessions launched with this launch profile. The\n default value is DEACTIVATED
, which means that backups are deactivated. To\n allow backups, set this value to AUTOMATIC
.
The maximum number of backups that each streaming session created from this launch\n profile can have.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Configures how streaming sessions are backed up when launched from this launch\n profile.
" - } - }, "com.amazonaws.nimble#StreamConfigurationSessionStorage": { "type": "structure", "members": { @@ -6241,20 +5846,18 @@ } }, "com.amazonaws.nimble#StreamingClipboardMode": { - "type": "enum", - "members": { - "ENABLED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENABLED" - } - }, - "DISABLED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DISABLED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLED", + "name": "DISABLED" } - } + ] } }, "com.amazonaws.nimble#StreamingImage": { @@ -6264,7 +5867,7 @@ "target": "com.amazonaws.nimble#String", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The Amazon Resource Name (ARN) that is assigned to a studio resource and uniquely\n identifies it. ARNs are unique across all Regions.
" + "smithy.api#documentation": "The ARN of the resource.
" } }, "description": { @@ -6306,14 +5909,14 @@ "target": "com.amazonaws.nimble#StreamingImageOwner", "traits": { "aws.cloudformation#cfnMutability": "read", - "smithy.api#documentation": "The owner of the streaming image, either the studioId
that contains the\n streaming image, or amazon
for images that are provided by Amazon Nimble Studio.
The owner of the streaming image, either the studioId that contains the streaming\n image, or 'amazon' for images that are provided by Amazon Nimble Studio.
" } }, "platform": { "target": "com.amazonaws.nimble#StreamingImagePlatform", "traits": { "aws.cloudformation#cfnMutability": "read", - "smithy.api#documentation": "The platform of the streaming image, either Windows or Linux.
" + "smithy.api#documentation": "The platform of the streaming image, either WINDOWS or LINUX.
" } }, "state": { @@ -6347,12 +5950,12 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } } }, "traits": { - "smithy.api#documentation": "Represents a streaming image resource.
\nStreaming images are used by studio users to select which operating system and\n software they want to use in a Nimble Studio streaming session.
\nAmazon provides a number of streaming images that include popular 3rd-party\n software.
\nYou can create your own streaming images using an Amazon EC2 machine image\n that you create for this purpose. You can also include software that your users\n require.
" + "smithy.api#documentation": "Represents a streaming image resource.
\nStreaming images are used by studio users to select which operating system and\n software they want to use in a Nimble Studio streaming session.
\nAmazon provides a number of streaming images that include popular 3rd-party\n software.
\nYou can create your own streaming images using an Amazon Elastic Compute Cloud (Amazon\n EC2) machine image that you create for this purpose. You can also include software that\n your users require.
" } }, "com.amazonaws.nimble#StreamingImageDescription": { @@ -6397,14 +6000,14 @@ } }, "com.amazonaws.nimble#StreamingImageEncryptionConfigurationKeyType": { - "type": "enum", - "members": { - "CUSTOMER_MANAGED_KEY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CUSTOMER_MANAGED_KEY" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CUSTOMER_MANAGED_KEY", + "name": "CUSTOMER_MANAGED_KEY" } - } + ] } }, "com.amazonaws.nimble#StreamingImageId": { @@ -6450,13 +6053,13 @@ "com.amazonaws.nimble#StreamingImageOwner": { "type": "string", "traits": { - "smithy.api#documentation": "StreamingImageOwner is the owner of a particular streaming image.
\nThis string is either the studioId that contains the streaming image, or the word\n AMAZON
for images provided by Nimble Studio.
StreamingImageOwner is the owner of a particular streaming image.
\nThis string is either the studioId that contains the streaming image, or the word\n 'AMAZON' for images provided by Nimble Studio.
" } }, "com.amazonaws.nimble#StreamingImagePlatform": { "type": "string", "traits": { - "smithy.api#documentation": "The platform of this streaming image, either Windows or Linux.
", + "smithy.api#documentation": "The platform of this Streaming Image, either WINDOWS or LINUX.
", "smithy.api#pattern": "^[a-zA-Z]*$" } }, @@ -6509,189 +6112,140 @@ } }, "com.amazonaws.nimble#StreamingImageState": { - "type": "enum", - "members": { - "CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_IN_PROGRESS" - } - }, - "READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "READY" - } - }, - "DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_IN_PROGRESS" - } - }, - "DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETED" - } - }, - "UPDATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_IN_PROGRESS" - } - }, - "UPDATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_FAILED" - } - }, - "CREATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_FAILED" - } - }, - "DELETE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_FAILED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "READY", + "name": "READY" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "UPDATE_IN_PROGRESS", + "name": "UPDATE_IN_PROGRESS" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" } - } + ] } }, "com.amazonaws.nimble#StreamingImageStatusCode": { - "type": "enum", - "members": { - "STREAMING_IMAGE_CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_IMAGE_CREATE_IN_PROGRESS" - } - }, - "STREAMING_IMAGE_READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_IMAGE_READY" - } - }, - "STREAMING_IMAGE_DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_IMAGE_DELETE_IN_PROGRESS" - } - }, - "STREAMING_IMAGE_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_IMAGE_DELETED" - } - }, - "STREAMING_IMAGE_UPDATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_IMAGE_UPDATE_IN_PROGRESS" - } - }, - "INTERNAL_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INTERNAL_ERROR" - } - }, - "ACCESS_DENIED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACCESS_DENIED" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The status code.
" + "smithy.api#documentation": "The status code.
", + "smithy.api#enum": [ + { + "value": "STREAMING_IMAGE_CREATE_IN_PROGRESS", + "name": "STREAMING_IMAGE_CREATE_IN_PROGRESS" + }, + { + "value": "STREAMING_IMAGE_READY", + "name": "STREAMING_IMAGE_READY" + }, + { + "value": "STREAMING_IMAGE_DELETE_IN_PROGRESS", + "name": "STREAMING_IMAGE_DELETE_IN_PROGRESS" + }, + { + "value": "STREAMING_IMAGE_DELETED", + "name": "STREAMING_IMAGE_DELETED" + }, + { + "value": "STREAMING_IMAGE_UPDATE_IN_PROGRESS", + "name": "STREAMING_IMAGE_UPDATE_IN_PROGRESS" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "ACCESS_DENIED", + "name": "ACCESS_DENIED" + } + ] } }, "com.amazonaws.nimble#StreamingInstanceType": { - "type": "enum", - "members": { - "g4dn_xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g4dn.xlarge" - } - }, - "g4dn_2xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g4dn.2xlarge" - } - }, - "g4dn_4xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g4dn.4xlarge" - } - }, - "g4dn_8xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g4dn.8xlarge" - } - }, - "g4dn_12xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g4dn.12xlarge" - } - }, - "g4dn_16xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g4dn.16xlarge" - } - }, - "g3_4xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g3.4xlarge" - } - }, - "g3s_xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g3s.xlarge" - } - }, - "g5_xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g5.xlarge" - } - }, - "g5_2xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g5.2xlarge" - } - }, - "g5_4xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g5.4xlarge" - } - }, - "g5_8xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g5.8xlarge" - } - }, - "g5_16xlarge": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "g5.16xlarge" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "g4dn.xlarge", + "name": "g4dn_xlarge" + }, + { + "value": "g4dn.2xlarge", + "name": "g4dn_2xlarge" + }, + { + "value": "g4dn.4xlarge", + "name": "g4dn_4xlarge" + }, + { + "value": "g4dn.8xlarge", + "name": "g4dn_8xlarge" + }, + { + "value": "g4dn.12xlarge", + "name": "g4dn_12xlarge" + }, + { + "value": "g4dn.16xlarge", + "name": "g4dn_16xlarge" + }, + { + "value": "g3.4xlarge", + "name": "g3_4xlarge" + }, + { + "value": "g3s.xlarge", + "name": "g3s_xlarge" + }, + { + "value": "g5.xlarge", + "name": "g5_xlarge" + }, + { + "value": "g5.2xlarge", + "name": "g5_2xlarge" + }, + { + "value": "g5.4xlarge", + "name": "g5_4xlarge" + }, + { + "value": "g5.8xlarge", + "name": "g5_8xlarge" + }, + { + "value": "g5.16xlarge", + "name": "g5_16xlarge" } - } + ], + "smithy.api#suppress": [ + "EnumTrait" + ] } }, "com.amazonaws.nimble#StreamingInstanceTypeList": { @@ -6713,13 +6267,13 @@ "arn": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) that is assigned to a studio resource and uniquely\n identifies it. ARNs are unique across all Regions.
" + "smithy.api#documentation": "The ARN of the resource.
" } }, "createdAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "createdBy": { @@ -6779,7 +6333,7 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } }, "terminateAt": { @@ -6791,7 +6345,7 @@ "updatedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was updated.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was updated.
" } }, "updatedBy": { @@ -6803,7 +6357,7 @@ "stoppedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The time the session entered STOP_IN_PROGRESS
state.
The time the session entered STOP_IN_PROGRESS state.
" } }, "stoppedBy": { @@ -6815,7 +6369,7 @@ "startedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The time the session entered START_IN_PROGRESS
state.
The time the session entered START_IN_PROGRESS state.
" } }, "startedBy": { @@ -6829,151 +6383,12 @@ "traits": { "smithy.api#documentation": "The time the streaming session will automatically be stopped if the user doesn’t stop\n the session themselves.
" } - }, - "startedFromBackupId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The backup ID used to restore a streaming session.
" - } - }, - "backupMode": { - "target": "com.amazonaws.nimble#SessionBackupMode", - "traits": { - "smithy.api#documentation": "Shows the current backup setting of the session.
" - } - }, - "maxBackupsToRetain": { - "target": "com.amazonaws.nimble#StreamConfigurationMaxBackupsToRetain", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The maximum number of backups of a streaming session that you can have. When the\n maximum number of backups is reached, the oldest backup is deleted.
" - } - }, - "volumeRetentionMode": { - "target": "com.amazonaws.nimble#VolumeRetentionMode", - "traits": { - "smithy.api#documentation": "Determine if an EBS volume created from this streaming session will be backed\n up.
" - } - }, - "sessionPersistenceMode": { - "target": "com.amazonaws.nimble#SessionPersistenceMode", - "traits": { - "smithy.api#documentation": "Determine if a streaming session created from this launch profile can configure\n persistent storage. This means that volumeConfiguration
and\n automaticTerminationMode
are configured.
Custom volume configuration for the root volumes that are attached to streaming\n sessions.
\nThis parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
.
Indicates if a streaming session created from this launch profile should be terminated\n automatically or retained without termination after being in a STOPPED
\n state.
When ACTIVATED
, the streaming session is scheduled for\n termination after being in the STOPPED
state for the time specified\n in maxStoppedSessionLengthInMinutes
.
When DEACTIVATED
, the streaming session can remain in the\n STOPPED
state indefinitely.
This parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
. When allowed, the default value for this parameter is\n DEACTIVATED
.
A streaming session is a virtual workstation created using a particular launch\n profile.
" } }, - "com.amazonaws.nimble#StreamingSessionBackup": { - "type": "structure", - "members": { - "arn": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) that is assigned to a studio resource and uniquely\n identifies it. ARNs are unique across all Regions.
" - } - }, - "createdAt": { - "target": "com.amazonaws.nimble#Timestamp", - "traits": { - "smithy.api#documentation": "The ISO timestamp in for when the resource was created.
" - } - }, - "launchProfileId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The ID of the launch profile which allowed the backups for the streaming\n session.
" - } - }, - "ownedBy": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The user ID of the user that owns the streaming session.
" - } - }, - "sessionId": { - "target": "com.amazonaws.nimble#StreamingSessionId", - "traits": { - "smithy.api#documentation": "The streaming session ID for the StreamingSessionBackup
.
The status code.
" - } - }, - "statusMessage": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The status message for the streaming session backup.
" - } - }, - "backupId": { - "target": "com.amazonaws.nimble#String", - "traits": { - "smithy.api#documentation": "The ID of the backup.
" - } - }, - "tags": { - "target": "com.amazonaws.nimble#Tags", - "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Information about the streaming session backup.
" - } - }, - "com.amazonaws.nimble#StreamingSessionBackupList": { - "type": "list", - "member": { - "target": "com.amazonaws.nimble#StreamingSessionBackup" - } - }, - "com.amazonaws.nimble#StreamingSessionBackupResource": { - "type": "resource", - "identifiers": { - "backupId": { - "target": "com.amazonaws.nimble#String" - } - }, - "read": { - "target": "com.amazonaws.nimble#GetStreamingSessionBackup" - }, - "list": { - "target": "com.amazonaws.nimble#ListStreamingSessionBackups" - }, - "traits": { - "aws.api#arn": { - "template": "streaming-session-backup/{backupId}" - }, - "aws.iam#conditionKeys": [ - "aws:ResourceTag/${TagKey}", - "nimble:ownedBy" - ], - "aws.iam#disableConditionKeyInference": {}, - "smithy.api#documentation": "Represents a streaming session backup originated from a studio user launched streaming session" - } - }, "com.amazonaws.nimble#StreamingSessionId": { "type": "string" }, @@ -7034,189 +6449,137 @@ } }, "com.amazonaws.nimble#StreamingSessionState": { - "type": "enum", - "members": { - "CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_IN_PROGRESS" - } - }, - "DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_IN_PROGRESS" - } - }, - "READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "READY" - } - }, - "DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETED" - } - }, - "CREATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_FAILED" - } - }, - "DELETE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_FAILED" - } - }, - "STOP_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STOP_IN_PROGRESS" - } - }, - "START_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "START_IN_PROGRESS" - } - }, - "STOPPED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STOPPED" - } - }, - "STOP_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STOP_FAILED" - } - }, - "START_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "START_FAILED" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The streaming session state.
" + "smithy.api#documentation": "The streaming session state.
", + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "READY", + "name": "READY" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "STOP_IN_PROGRESS", + "name": "STOP_IN_PROGRESS" + }, + { + "value": "START_IN_PROGRESS", + "name": "START_IN_PROGRESS" + }, + { + "value": "STOPPED", + "name": "STOPPED" + }, + { + "value": "STOP_FAILED", + "name": "STOP_FAILED" + }, + { + "value": "START_FAILED", + "name": "START_FAILED" + } + ] } }, "com.amazonaws.nimble#StreamingSessionStatusCode": { - "type": "enum", - "members": { - "STREAMING_SESSION_READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_READY" - } - }, - "STREAMING_SESSION_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_DELETED" - } - }, - "STREAMING_SESSION_CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_CREATE_IN_PROGRESS" - } - }, - "STREAMING_SESSION_DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_DELETE_IN_PROGRESS" - } - }, - "INTERNAL_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INTERNAL_ERROR" - } - }, - "INSUFFICIENT_CAPACITY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INSUFFICIENT_CAPACITY" - } - }, - "ACTIVE_DIRECTORY_DOMAIN_JOIN_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE_DIRECTORY_DOMAIN_JOIN_ERROR" - } - }, - "NETWORK_CONNECTION_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NETWORK_CONNECTION_ERROR" - } - }, - "INITIALIZATION_SCRIPT_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INITIALIZATION_SCRIPT_ERROR" - } - }, - "DECRYPT_STREAMING_IMAGE_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DECRYPT_STREAMING_IMAGE_ERROR" - } - }, - "NETWORK_INTERFACE_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NETWORK_INTERFACE_ERROR" - } - }, - "STREAMING_SESSION_STOPPED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_STOPPED" - } - }, - "STREAMING_SESSION_STARTED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_STARTED" - } - }, - "STREAMING_SESSION_STOP_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_STOP_IN_PROGRESS" - } - }, - "STREAMING_SESSION_START_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAMING_SESSION_START_IN_PROGRESS" - } - }, - "AMI_VALIDATION_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AMI_VALIDATION_ERROR" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "STREAMING_SESSION_READY", + "name": "STREAMING_SESSION_READY" + }, + { + "value": "STREAMING_SESSION_DELETED", + "name": "STREAMING_SESSION_DELETED" + }, + { + "value": "STREAMING_SESSION_CREATE_IN_PROGRESS", + "name": "STREAMING_SESSION_CREATE_IN_PROGRESS" + }, + { + "value": "STREAMING_SESSION_DELETE_IN_PROGRESS", + "name": "STREAMING_SESSION_DELETE_IN_PROGRESS" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "INSUFFICIENT_CAPACITY", + "name": "INSUFFICIENT_CAPACITY" + }, + { + "value": "ACTIVE_DIRECTORY_DOMAIN_JOIN_ERROR", + "name": "ACTIVE_DIRECTORY_DOMAIN_JOIN_ERROR" + }, + { + "value": "NETWORK_CONNECTION_ERROR", + "name": "NETWORK_CONNECTION_ERROR" + }, + { + "value": "INITIALIZATION_SCRIPT_ERROR", + "name": "INITIALIZATION_SCRIPT_ERROR" + }, + { + "value": "DECRYPT_STREAMING_IMAGE_ERROR", + "name": "DECRYPT_STREAMING_IMAGE_ERROR" + }, + { + "value": "NETWORK_INTERFACE_ERROR", + "name": "NETWORK_INTERFACE_ERROR" + }, + { + "value": "STREAMING_SESSION_STOPPED", + "name": "STREAMING_SESSION_STOPPED" + }, + { + "value": "STREAMING_SESSION_STARTED", + "name": "STREAMING_SESSION_STARTED" + }, + { + "value": "STREAMING_SESSION_STOP_IN_PROGRESS", + "name": "STREAMING_SESSION_STOP_IN_PROGRESS" + }, + { + "value": "STREAMING_SESSION_START_IN_PROGRESS", + "name": "STREAMING_SESSION_START_IN_PROGRESS" + }, + { + "value": "AMI_VALIDATION_ERROR", + "name": "AMI_VALIDATION_ERROR" } - } + ] } }, "com.amazonaws.nimble#StreamingSessionStorageMode": { - "type": "enum", - "members": { - "UPLOAD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPLOAD" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UPLOAD", + "name": "UPLOAD" } - } + ] } }, "com.amazonaws.nimble#StreamingSessionStorageModeList": { @@ -7278,7 +6641,7 @@ "createdAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "createdBy": { @@ -7290,7 +6653,7 @@ "expiresAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource expires.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource expires.
" } }, "ownedBy": { @@ -7331,7 +6694,7 @@ "com.amazonaws.nimble#StreamingSessionStreamExpirationInSeconds": { "type": "integer", "traits": { - "smithy.api#default": 120, + "smithy.api#default": 0, "smithy.api#range": { "min": 60, "max": 3600 @@ -7339,85 +6702,65 @@ } }, "com.amazonaws.nimble#StreamingSessionStreamState": { - "type": "enum", - "members": { - "READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "READY" - } - }, - "CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_IN_PROGRESS" - } - }, - "DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_IN_PROGRESS" - } - }, - "DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETED" - } - }, - "CREATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_FAILED" - } - }, - "DELETE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_FAILED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "READY", + "name": "READY" + }, + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" } - } + ] } }, "com.amazonaws.nimble#StreamingSessionStreamStatusCode": { - "type": "enum", - "members": { - "STREAM_CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAM_CREATE_IN_PROGRESS" - } - }, - "STREAM_READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAM_READY" - } - }, - "STREAM_DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAM_DELETE_IN_PROGRESS" - } - }, - "STREAM_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STREAM_DELETED" - } - }, - "INTERNAL_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INTERNAL_ERROR" - } - }, - "NETWORK_CONNECTION_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NETWORK_CONNECTION_ERROR" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "STREAM_CREATE_IN_PROGRESS", + "name": "STREAM_CREATE_IN_PROGRESS" + }, + { + "value": "STREAM_READY", + "name": "STREAM_READY" + }, + { + "value": "STREAM_DELETE_IN_PROGRESS", + "name": "STREAM_DELETE_IN_PROGRESS" + }, + { + "value": "STREAM_DELETED", + "name": "STREAM_DELETED" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "NETWORK_CONNECTION_ERROR", + "name": "NETWORK_CONNECTION_ERROR" } - } + ] } }, "com.amazonaws.nimble#String": { @@ -7436,7 +6779,7 @@ "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "The IAM role that studio admins assume when logging in to the Nimble Studio portal.
" + "smithy.api#documentation": "The IAM role that studio admins assume when logging in to the Nimble Studio\n portal.
" } }, "arn": { @@ -7450,7 +6793,7 @@ "target": "com.amazonaws.nimble#Timestamp", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "displayName": { @@ -7470,7 +6813,7 @@ "target": "com.amazonaws.nimble#String", "traits": { "aws.cloudformation#cfnMutability": "read", - "smithy.api#documentation": "The IAM Identity Center application client ID used to integrate with IAM Identity Center. This ID allows IAM Identity Center users to log in to Nimble Studio\n portal.
" + "smithy.api#documentation": "The IAM Identity Center application client ID used to integrate with IAM Identity Center\n to enable IAM Identity Center users to log in to Nimble Studio portal.
" } }, "state": { @@ -7524,26 +6867,26 @@ "target": "com.amazonaws.nimble#Tags", "traits": { "aws.cloudformation#cfnMutability": "create-and-read", - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } }, "updatedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was updated.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was updated.
" } }, "userRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "The IAM role that studio users assume when logging in to the Nimble Studio portal.
" + "smithy.api#documentation": "The IAM role that studio users assume when logging in to the Nimble Studio\n portal.
" } } }, "traits": { - "smithy.api#documentation": "Represents a studio resource.
\nA studio is the core resource used with Nimble Studio. You must create a studio\n first, before any other resource type can be created. All other resources you create and\n manage in Nimble Studio are contained within a studio.
\nWhen creating a studio, you must provides two IAM roles for use with\n the Nimble Studio portal. These roles are assumed by your users when they log in to\n the Nimble Studio portal via IAM Identity Center and your identity source.
\nThe user role must have the AmazonNimbleStudio-StudioUser
managed policy\n attached for the portal to function properly.
The admin role must have the AmazonNimbleStudio-StudioAdmin
managed\n policy attached for the portal to function properly.
Your studio roles must trust the identity.nimble.amazonaws.com
service\n principal to function properly.
Represents a studio resource.
\nA studio is the core resource used with Nimble Studio. You must create a studio\n first, before any other resource type can be created. All other resources you create and\n manage in Nimble Studio are contained within a studio.
\nWhen creating a studio, you must provides two IAM roles for use with the Nimble Studio portal. These roles are assumed by your users when they log in to the\n Nimble Studio portal via IAM Identity Center and your identity source.
\nThe user role must have the AmazonNimbleStudio-StudioUser managed policy attached for\n the portal to function properly.
\nThe admin role must have the AmazonNimbleStudio-StudioAdmin managed policy attached\n for the portal to function properly.
\nYour studio roles must trust the identity.nimble.amazonaws.com service principal to\n function properly.
" } }, "com.amazonaws.nimble#StudioComponent": { @@ -7553,7 +6896,7 @@ "target": "com.amazonaws.nimble#String", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The Amazon Resource Name (ARN) that is assigned to a studio resource and uniquely\n identifies it. ARNs are unique across all Regions.
" + "smithy.api#documentation": "The ARN of the resource.
" } }, "configuration": { @@ -7567,7 +6910,7 @@ "target": "com.amazonaws.nimble#Timestamp", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "createdBy": { @@ -7649,7 +6992,7 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } }, "type": { @@ -7663,7 +7006,7 @@ "target": "com.amazonaws.nimble#Timestamp", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was updated.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was updated.
" } }, "updatedBy": { @@ -7677,14 +7020,14 @@ "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization\n script runs which give the studio component access to Amazon Web Services resources when\n the system initialization script runs.
" + "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
" } }, "runtimeRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio\n component access to Amazon Web Services resources at anytime while the instance is\n running.
" + "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
" } } }, @@ -7698,7 +7041,7 @@ "activeDirectoryConfiguration": { "target": "com.amazonaws.nimble#ActiveDirectoryConfiguration", "traits": { - "smithy.api#documentation": "The configuration for a Directory Service for Microsoft Active Directory studio resource.
" + "smithy.api#documentation": "The configuration for a Microsoft Active Directory (Microsoft AD) studio\n resource.
" } }, "computeFarmConfiguration": { @@ -7756,7 +7099,7 @@ "platform": { "target": "com.amazonaws.nimble#LaunchProfilePlatform", "traits": { - "smithy.api#documentation": "The platform of the initialization script, either Windows or Linux.
" + "smithy.api#documentation": "The platform of the initialization script, either WINDOWS or LINUX.
" } }, "runContext": { @@ -7793,20 +7136,18 @@ } }, "com.amazonaws.nimble#StudioComponentInitializationScriptRunContext": { - "type": "enum", - "members": { - "SYSTEM_INITIALIZATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SYSTEM_INITIALIZATION" - } - }, - "USER_INITIALIZATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "USER_INITIALIZATION" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SYSTEM_INITIALIZATION", + "name": "SYSTEM_INITIALIZATION" + }, + { + "value": "USER_INITIALIZATION", + "name": "USER_INITIALIZATION" } - } + ] } }, "com.amazonaws.nimble#StudioComponentList": { @@ -7905,59 +7246,43 @@ } }, "com.amazonaws.nimble#StudioComponentState": { - "type": "enum", - "members": { - "CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_IN_PROGRESS" - } - }, - "READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "READY" - } - }, - "UPDATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_IN_PROGRESS" - } - }, - "DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_IN_PROGRESS" - } - }, - "DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETED" - } - }, - "DELETE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_FAILED" - } - }, - "CREATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_FAILED" - } - }, - "UPDATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_FAILED" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The current state of the studio component resource.
\nWhile a studio component is being created, modified, or deleted, its state will be\n CREATE_IN_PROGRESS
, UPDATE_IN_PROGRESS
, or\n DELETE_IN_PROGRESS
.
These are called transition states.
\nNo modifications may be made to the studio component while it is in a transition\n state.
\nIf creation of the resource fails, the state will change to\n CREATE_FAILED
. The resource StatusCode
and\n StatusMessage
will provide more information of why creation failed. The\n resource in this state will automatically be deleted from your account after a period of\n time.
If updating the resource fails, the state will change to UPDATE_FAILED
.\n The resource StatusCode
and StatusMessage
will provide more\n information of why the update failed. The resource will be returned to the state it was\n in when the update request was invoked.
If deleting the resource fails, the state will change to DELETE_FAILED
.\n The resource StatusCode
and StatusMessage
will provide more\n information of why the update failed. The resource will be returned to the state it was\n in when the update request was invoked. After the resource is deleted successfully, it\n will change to the DELETED
state. The resource will no longer count against\n service quotas and cannot be used or acted upon any futher. It will be removed from your\n account after a period of time.
The current state of the studio component resource.
\nWhile a studio component is being created, modified, or deleted, its state will equal\n \"CREATE_IN_PROGRESS\", \"UPDATE_IN_PROGRESS\", or \"DELETE_IN_PROGRESS\"
\nThese are called 'transition states'.
\nNo modifications may be made to the studio component while it is in a transition\n state.
\nIf creation of the resource fails, the state will change to `CREATE_FAILED`. The\n resource StatusCode and StatusMessage will provide more information of why creation\n failed. The resource in this state will automatically be deleted from your account after\n a period of time.
\nIf updating the resource fails, the state will change to `UPDATE_FAILED`. The resource\n StatusCode and StatusMessage will provide more information of why the update failed. The\n resource will be returned to the state it was in when the update request was\n invoked.
\nIf deleting the resource fails, the state will change to `DELETE_FAILED`. The resource\n StatusCode and StatusMessage will provide more information of why the update failed. The\n resource will be returned to the state it was in when the update request was invoked.\n After the resource is deleted successfully, it will change to the \"DELETED\" state. The\n resource will no longer count against service quotas and cannot be used or acted upon\n any futher. It will be removed from your account after a period of time.
", + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "READY", + "name": "READY" + }, + { + "value": "UPDATE_IN_PROGRESS", + "name": "UPDATE_IN_PROGRESS" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" + } + ] } }, "com.amazonaws.nimble#StudioComponentStateList": { @@ -7967,100 +7292,74 @@ } }, "com.amazonaws.nimble#StudioComponentStatusCode": { - "type": "enum", - "members": { - "ACTIVE_DIRECTORY_ALREADY_EXISTS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE_DIRECTORY_ALREADY_EXISTS" - } - }, - "STUDIO_COMPONENT_CREATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_COMPONENT_CREATED" - } - }, - "STUDIO_COMPONENT_UPDATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_COMPONENT_UPDATED" - } - }, - "STUDIO_COMPONENT_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_COMPONENT_DELETED" - } - }, - "ENCRYPTION_KEY_ACCESS_DENIED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENCRYPTION_KEY_ACCESS_DENIED" - } - }, - "ENCRYPTION_KEY_NOT_FOUND": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENCRYPTION_KEY_NOT_FOUND" - } - }, - "STUDIO_COMPONENT_CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_COMPONENT_CREATE_IN_PROGRESS" - } - }, - "STUDIO_COMPONENT_UPDATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_COMPONENT_UPDATE_IN_PROGRESS" - } - }, - "STUDIO_COMPONENT_DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_COMPONENT_DELETE_IN_PROGRESS" - } - }, - "INTERNAL_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INTERNAL_ERROR" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The current status of the studio component resource.
\nWhen the resource is in the READY
state, the status code signals what the\n last mutation made to the resource was.
When the resource is in a CREATE_FAILED
, UPDATE_FAILED
, or\n DELETE_FAILED
state, the status code signals what went wrong and why\n the mutation failed.
The current status of the studio component resource.
\nWhen the resource is in the 'READY' state, the status code signals what the last\n mutation made to the resource was.
\nWhen the resource is in a CREATE_FAILED/UPDATE_FAILED/DELETE_FAILED state, the status\n code signals what went wrong and why the mutation failed.
", + "smithy.api#enum": [ + { + "value": "ACTIVE_DIRECTORY_ALREADY_EXISTS", + "name": "ACTIVE_DIRECTORY_ALREADY_EXISTS" + }, + { + "value": "STUDIO_COMPONENT_CREATED", + "name": "STUDIO_COMPONENT_CREATED" + }, + { + "value": "STUDIO_COMPONENT_UPDATED", + "name": "STUDIO_COMPONENT_UPDATED" + }, + { + "value": "STUDIO_COMPONENT_DELETED", + "name": "STUDIO_COMPONENT_DELETED" + }, + { + "value": "ENCRYPTION_KEY_ACCESS_DENIED", + "name": "ENCRYPTION_KEY_ACCESS_DENIED" + }, + { + "value": "ENCRYPTION_KEY_NOT_FOUND", + "name": "ENCRYPTION_KEY_NOT_FOUND" + }, + { + "value": "STUDIO_COMPONENT_CREATE_IN_PROGRESS", + "name": "STUDIO_COMPONENT_CREATE_IN_PROGRESS" + }, + { + "value": "STUDIO_COMPONENT_UPDATE_IN_PROGRESS", + "name": "STUDIO_COMPONENT_UPDATE_IN_PROGRESS" + }, + { + "value": "STUDIO_COMPONENT_DELETE_IN_PROGRESS", + "name": "STUDIO_COMPONENT_DELETE_IN_PROGRESS" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + } + ] } }, "com.amazonaws.nimble#StudioComponentSubtype": { - "type": "enum", - "members": { - "AWS_MANAGED_MICROSOFT_AD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_MANAGED_MICROSOFT_AD" - } - }, - "AMAZON_FSX_FOR_WINDOWS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AMAZON_FSX_FOR_WINDOWS" - } - }, - "AMAZON_FSX_FOR_LUSTRE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AMAZON_FSX_FOR_LUSTRE" - } - }, - "CUSTOM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CUSTOM" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS_MANAGED_MICROSOFT_AD", + "name": "AWS_MANAGED_MICROSOFT_AD" + }, + { + "value": "AMAZON_FSX_FOR_WINDOWS", + "name": "AMAZON_FSX_FOR_WINDOWS" + }, + { + "value": "AMAZON_FSX_FOR_LUSTRE", + "name": "AMAZON_FSX_FOR_LUSTRE" + }, + { + "value": "CUSTOM", + "name": "CUSTOM" } - } + ] } }, "com.amazonaws.nimble#StudioComponentSummary": { @@ -8069,7 +7368,7 @@ "createdAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was created.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was created.
" } }, "createdBy": { @@ -8111,7 +7410,7 @@ "updatedAt": { "target": "com.amazonaws.nimble#Timestamp", "traits": { - "smithy.api#documentation": "The ISO timestamp in seconds for when the resource was updated.
" + "smithy.api#documentation": "The Unix epoch timestamp in seconds for when the resource was updated.
" } }, "updatedBy": { @@ -8132,38 +7431,30 @@ } }, "com.amazonaws.nimble#StudioComponentType": { - "type": "enum", - "members": { - "ACTIVE_DIRECTORY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ACTIVE_DIRECTORY" - } - }, - "SHARED_FILE_SYSTEM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SHARED_FILE_SYSTEM" - } - }, - "COMPUTE_FARM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "COMPUTE_FARM" - } - }, - "LICENSE_SERVICE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LICENSE_SERVICE" - } - }, - "CUSTOM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CUSTOM" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACTIVE_DIRECTORY", + "name": "ACTIVE_DIRECTORY" + }, + { + "value": "SHARED_FILE_SYSTEM", + "name": "SHARED_FILE_SYSTEM" + }, + { + "value": "COMPUTE_FARM", + "name": "COMPUTE_FARM" + }, + { + "value": "LICENSE_SERVICE", + "name": "LICENSE_SERVICE" + }, + { + "value": "CUSTOM", + "name": "CUSTOM" } - } + ] } }, "com.amazonaws.nimble#StudioComponentTypeList": { @@ -8214,23 +7505,19 @@ } }, "com.amazonaws.nimble#StudioEncryptionConfigurationKeyType": { - "type": "enum", - "members": { - "AWS_OWNED_KEY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_OWNED_KEY" - } - }, - "CUSTOMER_MANAGED_KEY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CUSTOMER_MANAGED_KEY" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The type of KMS key that is used to encrypt studio data.
" + "smithy.api#documentation": "The type of KMS key that is used to encrypt studio data.
", + "smithy.api#enum": [ + { + "value": "AWS_OWNED_KEY", + "name": "AWS_OWNED_KEY" + }, + { + "value": "CUSTOMER_MANAGED_KEY", + "name": "CUSTOMER_MANAGED_KEY" + } + ] } }, "com.amazonaws.nimble#StudioList": { @@ -8268,7 +7555,7 @@ } }, "traits": { - "smithy.api#documentation": "A studio member is an association of a user from your studio identity source to\n elevated permissions that they are granted in the studio.
\nWhen you add a user to your studio using the Nimble Studio console, they are\n given access to the studio's IAM Identity Center application and are given access to log\n in to the Nimble Studio portal. These users have the permissions provided by the\n studio's user IAM role and do not appear in the studio membership\n collection. Only studio admins appear in studio membership.
\nWhen you add a user to studio membership with the ADMIN persona, upon logging in to\n the Nimble Studio portal, they are granted permissions specified by the Studio's\n Admin IAM role.
" + "smithy.api#documentation": "A studio member is an association of a user from your studio identity source to\n elevated permissions that they are granted in the studio.
\nWhen you add a user to your studio using the Nimble Studio console, they are\n given access to the studio's IAM Identity Center application and are given access to log in to the\n Nimble Studio portal. These users have the permissions provided by the studio's\n user IAM role and do not appear in the studio membership collection. Only studio admins\n appear in studio membership.
\nWhen you add a user to studio membership with the persona ADMIN, upon logging in to\n the Nimble Studio portal, they are granted permissions specified by the Studio's\n Admin IAM role.
" } }, "com.amazonaws.nimble#StudioMembershipList": { @@ -8294,14 +7581,14 @@ } }, "com.amazonaws.nimble#StudioPersona": { - "type": "enum", - "members": { - "ADMINISTRATOR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ADMINISTRATOR" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ADMINISTRATOR", + "name": "ADMINISTRATOR" } - } + ] } }, "com.amazonaws.nimble#StudioResource": { @@ -8370,178 +7657,122 @@ } }, "com.amazonaws.nimble#StudioState": { - "type": "enum", - "members": { - "CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_IN_PROGRESS" - } - }, - "READY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "READY" - } - }, - "UPDATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_IN_PROGRESS" - } - }, - "DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_IN_PROGRESS" - } - }, - "DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETED" - } - }, - "DELETE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_FAILED" - } - }, - "CREATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CREATE_FAILED" - } - }, - "UPDATE_FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_FAILED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATE_IN_PROGRESS", + "name": "CREATE_IN_PROGRESS" + }, + { + "value": "READY", + "name": "READY" + }, + { + "value": "UPDATE_IN_PROGRESS", + "name": "UPDATE_IN_PROGRESS" + }, + { + "value": "DELETE_IN_PROGRESS", + "name": "DELETE_IN_PROGRESS" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "DELETE_FAILED", + "name": "DELETE_FAILED" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" } - } + ] } }, "com.amazonaws.nimble#StudioStatusCode": { - "type": "enum", - "members": { - "STUDIO_CREATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_CREATED" - } - }, - "STUDIO_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_DELETED" - } - }, - "STUDIO_UPDATED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_UPDATED" - } - }, - "STUDIO_CREATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_CREATE_IN_PROGRESS" - } - }, - "STUDIO_UPDATE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_UPDATE_IN_PROGRESS" - } - }, - "STUDIO_DELETE_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_DELETE_IN_PROGRESS" - } - }, - "STUDIO_WITH_LAUNCH_PROFILES_NOT_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_WITH_LAUNCH_PROFILES_NOT_DELETED" - } - }, - "STUDIO_WITH_STUDIO_COMPONENTS_NOT_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_WITH_STUDIO_COMPONENTS_NOT_DELETED" - } - }, - "STUDIO_WITH_STREAMING_IMAGES_NOT_DELETED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "STUDIO_WITH_STREAMING_IMAGES_NOT_DELETED" - } - }, - "AWS_SSO_NOT_ENABLED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_SSO_NOT_ENABLED" - } - }, - "AWS_SSO_ACCESS_DENIED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_SSO_ACCESS_DENIED" - } - }, - "ROLE_NOT_OWNED_BY_STUDIO_OWNER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ROLE_NOT_OWNED_BY_STUDIO_OWNER" - } - }, - "ROLE_COULD_NOT_BE_ASSUMED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ROLE_COULD_NOT_BE_ASSUMED" - } - }, - "INTERNAL_ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INTERNAL_ERROR" - } - }, - "ENCRYPTION_KEY_NOT_FOUND": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENCRYPTION_KEY_NOT_FOUND" - } - }, - "ENCRYPTION_KEY_ACCESS_DENIED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENCRYPTION_KEY_ACCESS_DENIED" - } - }, - "AWS_SSO_CONFIGURATION_REPAIRED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_SSO_CONFIGURATION_REPAIRED" - } - }, - "AWS_SSO_CONFIGURATION_REPAIR_IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_SSO_CONFIGURATION_REPAIR_IN_PROGRESS" - } - }, - "AWS_STS_REGION_DISABLED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AWS_STS_REGION_DISABLED" - } - } - }, + "type": "string", "traits": { - "smithy.api#documentation": "The status code.
" + "smithy.api#documentation": "The status code.
", + "smithy.api#enum": [ + { + "value": "STUDIO_CREATED", + "name": "STUDIO_CREATED" + }, + { + "value": "STUDIO_DELETED", + "name": "STUDIO_DELETED" + }, + { + "value": "STUDIO_UPDATED", + "name": "STUDIO_UPDATED" + }, + { + "value": "STUDIO_CREATE_IN_PROGRESS", + "name": "STUDIO_CREATE_IN_PROGRESS" + }, + { + "value": "STUDIO_UPDATE_IN_PROGRESS", + "name": "STUDIO_UPDATE_IN_PROGRESS" + }, + { + "value": "STUDIO_DELETE_IN_PROGRESS", + "name": "STUDIO_DELETE_IN_PROGRESS" + }, + { + "value": "STUDIO_WITH_LAUNCH_PROFILES_NOT_DELETED", + "name": "STUDIO_WITH_LAUNCH_PROFILES_NOT_DELETED" + }, + { + "value": "STUDIO_WITH_STUDIO_COMPONENTS_NOT_DELETED", + "name": "STUDIO_WITH_STUDIO_COMPONENTS_NOT_DELETED" + }, + { + "value": "STUDIO_WITH_STREAMING_IMAGES_NOT_DELETED", + "name": "STUDIO_WITH_STREAMING_IMAGES_NOT_DELETED" + }, + { + "value": "AWS_SSO_NOT_ENABLED", + "name": "AWS_SSO_NOT_ENABLED" + }, + { + "value": "AWS_SSO_ACCESS_DENIED", + "name": "AWS_SSO_ACCESS_DENIED" + }, + { + "value": "ROLE_NOT_OWNED_BY_STUDIO_OWNER", + "name": "ROLE_NOT_OWNED_BY_STUDIO_OWNER" + }, + { + "value": "ROLE_COULD_NOT_BE_ASSUMED", + "name": "ROLE_COULD_NOT_BE_ASSUMED" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "ENCRYPTION_KEY_NOT_FOUND", + "name": "ENCRYPTION_KEY_NOT_FOUND" + }, + { + "value": "ENCRYPTION_KEY_ACCESS_DENIED", + "name": "ENCRYPTION_KEY_ACCESS_DENIED" + }, + { + "value": "AWS_SSO_CONFIGURATION_REPAIRED", + "name": "AWS_SSO_CONFIGURATION_REPAIRED" + }, + { + "value": "AWS_SSO_CONFIGURATION_REPAIR_IN_PROGRESS", + "name": "AWS_SSO_CONFIGURATION_REPAIR_IN_PROGRESS" + } + ] } }, "com.amazonaws.nimble#TagResource": { @@ -8591,7 +7822,7 @@ "resourceArn": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource you want to add tags to.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource you want to add tags to.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -8599,7 +7830,7 @@ "tags": { "target": "com.amazonaws.nimble#Tags", "traits": { - "smithy.api#documentation": "A collection of labels, in the form of key-value pairs, that apply to this\n resource.
" + "smithy.api#documentation": "A collection of labels, in the form of key:value pairs, that apply to this\n resource.
" } } }, @@ -8890,7 +8121,7 @@ "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -8898,7 +8129,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -8952,7 +8183,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -8966,7 +8197,7 @@ "launchProfileId": { "target": "com.amazonaws.nimble#String", "traits": { - "smithy.api#documentation": "The ID of the launch profile used to control access from the streaming session.
", + "smithy.api#documentation": "The Launch Profile ID.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9071,7 +8302,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -9218,7 +8449,7 @@ "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -9291,14 +8522,14 @@ "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization\n script runs which give the studio component access to Amazon Web Services resources when\n the system initialization script runs.
" + "smithy.api#documentation": "An IAM role attached to Studio Component when the system initialization script runs which give the studio component access to AWS resources when the system initialization script runs.
" } }, "runtimeRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { "aws.cloudformation#cfnMutability": "full", - "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio\n component access to Amazon Web Services resources at anytime while the instance is\n running.
" + "smithy.api#documentation": "An IAM role attached to a Studio Component that gives the studio component access to AWS resources at anytime while the instance is running.
" } } }, @@ -9326,14 +8557,14 @@ "adminRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { - "smithy.api#documentation": "The IAM role that Studio Admins will assume when logging in to the\n Nimble Studio portal.
" + "smithy.api#documentation": "The IAM role that Studio Admins will assume when logging in to the Nimble Studio\n portal.
" } }, "clientToken": { "target": "com.amazonaws.nimble#ClientToken", "traits": { "aws.cloudformation#cfnExcludeProperty": {}, - "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the Amazon Web Services SDK automatically\n generates a client token and uses it for the request to ensure idempotency.
", + "smithy.api#documentation": "Unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. If you don’t specify a client token, the AWS SDK automatically generates a\n client token and uses it for the request to ensure idempotency.
", "smithy.api#httpHeader": "X-Amz-Client-Token", "smithy.api#idempotencyToken": {} } @@ -9355,7 +8586,7 @@ "userRoleArn": { "target": "com.amazonaws.nimble#RoleArn", "traits": { - "smithy.api#documentation": "The IAM role that Studio Users will assume when logging in to the\n Nimble Studio portal.
" + "smithy.api#documentation": "The IAM role that Studio Users will assume when logging in to the Nimble Studio\n portal.
" } } }, @@ -9448,82 +8679,6 @@ "target": "com.amazonaws.nimble#ValidationResult" } }, - "com.amazonaws.nimble#VolumeConfiguration": { - "type": "structure", - "members": { - "size": { - "target": "com.amazonaws.nimble#VolumeSizeInGiB", - "traits": { - "smithy.api#default": 500, - "smithy.api#documentation": "The size of the root volume that is attached to the streaming session. The root volume\n size is measured in GiBs.
" - } - }, - "throughput": { - "target": "com.amazonaws.nimble#VolumeThroughputInMiBs", - "traits": { - "smithy.api#default": 125, - "smithy.api#documentation": "The throughput to provision for the root volume that is attached to the streaming\n session. The throughput is measured in MiB/s.
" - } - }, - "iops": { - "target": "com.amazonaws.nimble#VolumeIops", - "traits": { - "smithy.api#default": 3000, - "smithy.api#documentation": "The number of I/O operations per second for the root volume that is attached to\n streaming session.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Custom volume configuration for the root volumes that are attached to streaming\n sessions.
\nThis parameter is only allowed when sessionPersistenceMode
is\n ACTIVATED
.
Tags to assign to the blue/green deployment.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#CreateBlueGreenDeploymentResponse": { @@ -4434,6 +4437,9 @@ "BlueGreenDeployment": { "target": "com.amazonaws.rds#BlueGreenDeployment" } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#CreateCustomDBEngineVersion": { @@ -5448,7 +5454,7 @@ "EnableCustomerOwnedIp": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS\n on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" + "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS\n on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" } }, "CustomIamInstanceProfile": { @@ -5765,12 +5771,6 @@ "traits": { "smithy.api#documentation": "Specifies the storage throughput value for the read replica.
\nThis setting doesn't apply to RDS Custom or Amazon Aurora.
" } - }, - "EnableCustomerOwnedIp": { - "target": "com.amazonaws.rds#BooleanOptional", - "traits": { - "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS\n on Outposts read replica.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the read replica from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" - } } } }, @@ -5944,6 +5944,9 @@ "Tags": { "target": "com.amazonaws.rds#TagList" } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#CreateDBProxyEndpointResponse": { @@ -5955,6 +5958,9 @@ "smithy.api#documentation": "The DBProxyEndpoint
object that is created by the API operation.\n The DB proxy endpoint that you create might provide capabilities such as read/write\n or read-only operations, or using a different VPC than the proxy's default VPC.
An optional set of key-value pairs to associate arbitrary data of your choosing with the proxy.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#CreateDBProxyResponse": { @@ -6038,6 +6047,9 @@ "smithy.api#documentation": "The DBProxy
structure corresponding to the new proxy.
Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" + "smithy.api#documentation": "Specifies whether a customer-owned IP address (CoIP) is enabled for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" } }, "AwsBackupRecoveryPointArn": { @@ -10578,6 +10590,9 @@ "smithy.api#documentation": "A value that indicates whether to delete the resources in the green environment.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DeleteBlueGreenDeploymentResponse": { @@ -10586,6 +10601,9 @@ "BlueGreenDeployment": { "target": "com.amazonaws.rds#BlueGreenDeployment" } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#DeleteCustomDBEngineVersion": { @@ -11012,6 +11030,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DeleteDBProxyEndpointResponse": { @@ -11023,6 +11044,9 @@ "smithy.api#documentation": "The data structure representing the details of the DB proxy endpoint that you delete.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#DeleteDBProxyRequest": { @@ -11035,6 +11059,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DeleteDBProxyResponse": { @@ -11046,6 +11073,9 @@ "smithy.api#documentation": "The data structure representing the details of the DB proxy that you delete.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#DeleteDBSecurityGroup": { @@ -11336,11 +11366,17 @@ "smithy.api#documentation": "One or more DB cluster identifiers.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DeregisterDBProxyTargetsResponse": { "type": "structure", - "members": {} + "members": {}, + "traits": { + "smithy.api#output": {} + } }, "com.amazonaws.rds#DescribeAccountAttributes": { "type": "operation", @@ -11411,6 +11447,9 @@ "smithy.api#documentation": "The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so you can retrieve the remaining results.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DescribeBlueGreenDeploymentsResponse": { @@ -11428,6 +11467,9 @@ "smithy.api#documentation": "A pagination token that can be used in a later DescribeBlueGreenDeployments request.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#DescribeCertificates": { @@ -12631,7 +12673,8 @@ } }, "traits": { - "smithy.api#documentation": "The response from a call to DescribeDBLogFiles
.
The response from a call to DescribeDBLogFiles
.
The maximum number of records to include in the response. If more records exist\n than the specified MaxRecords
value, a pagination token called a marker is\n included in the response so that the remaining results can be retrieved.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DescribeDBProxiesResponse": { @@ -12815,6 +12861,9 @@ "smithy.api#documentation": "An optional pagination token provided by a previous request.\n If this parameter is specified, the response includes only records beyond the marker,\n up to the value specified by MaxRecords
.
The maximum number of records to include in the response. If more records exist\n than the specified MaxRecords
value, a pagination token called a marker is\n included in the response so that the remaining results can be retrieved.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DescribeDBProxyEndpointsResponse": { @@ -12893,6 +12945,9 @@ "smithy.api#documentation": "An optional pagination token provided by a previous request.\n If this parameter is specified, the response includes only records beyond the marker,\n up to the value specified by MaxRecords
.
The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so that the remaining\n results can be retrieved.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DescribeDBProxyTargetGroupsResponse": { @@ -12975,6 +13033,9 @@ "smithy.api#documentation": "An optional pagination token provided by a previous request.\n If this parameter is specified, the response includes only records beyond the marker,\n up to the value specified by MaxRecords
.
The maximum number of records to include in the response.\n If more records exist than the specified MaxRecords
value,\n a pagination token called a marker is included in the response so that the remaining\n results can be retrieved.
Default: 100
\nConstraints: Minimum 20, maximum 100.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#DescribeDBProxyTargetsResponse": { @@ -13060,6 +13124,9 @@ "smithy.api#documentation": "An optional pagination token provided by a previous request.\n If this parameter is specified, the response includes only records beyond the marker,\n up to the value specified by MaxRecords
.
The audit policy state. When a policy is unlocked, it is read/write. When it is locked, it is\n read-only. You can edit your audit policy only when the activity stream is unlocked or stopped.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#ModifyActivityStreamResponse": { @@ -16347,6 +16417,9 @@ "smithy.api#documentation": "The status of the modification to the policy state of the database activity stream.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#ModifyCertificates": { @@ -17036,7 +17109,7 @@ "DBInstanceClass": { "target": "com.amazonaws.rds#String", "traits": { - "smithy.api#documentation": "The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all\n Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and availability for your engine, see DB Instance Class in the \n Amazon RDS User Guide or \n Aurora\n DB instance classes in the Amazon Aurora User Guide. For RDS Custom, see DB instance class support for RDS Custom for Oracle and \n DB instance class support for RDS Custom for SQL Server.
\nIf you modify the DB instance class, an outage occurs during the change. The change is\n applied during the next maintenance window, unless you specify\n ApplyImmediately
in your request.
Default: Uses existing setting
" + "smithy.api#documentation": "The new compute and memory capacity of the DB instance, for example db.m4.large. Not all DB instance classes are available in all\n Amazon Web Services Regions, or for all database engines. For the full list of DB instance classes, and \n availability for your engine, see\n DB instance \n classes in the Amazon RDS User Guide or \n Aurora \n DB instance classes in the Amazon Aurora User Guide.
\nIf you modify the DB instance class, an outage occurs during the change. The change is applied during the next maintenance window,\n unless ApplyImmediately
is enabled for this request.
This setting doesn't apply to RDS Custom for Oracle.
\nDefault: Uses existing setting
" } }, "DBSubnetGroupName": { @@ -17284,7 +17357,7 @@ "EnableCustomerOwnedIp": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" + "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" } }, "AwsBackupRecoveryPointArn": { @@ -17443,6 +17516,9 @@ "smithy.api#documentation": "The VPC security group IDs for the DB proxy endpoint. When the DB proxy endpoint\n uses a different VPC than the original proxy, you also specify a different\n set of security group IDs than for the original proxy.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#ModifyDBProxyEndpointResponse": { @@ -17454,6 +17530,9 @@ "smithy.api#documentation": "The DBProxyEndpoint
object representing the new settings for the DB proxy endpoint.
The new list of security groups for the DBProxy
.
The DBProxy
object representing the new settings for the proxy.
The new name for the modified DBProxyTarget
. An identifier must begin with a letter and must contain only ASCII letters, digits, and hyphens; it can't end with a hyphen or contain two consecutive hyphens.
The settings of the modified DBProxyTarget
.
One or more DB cluster identifiers.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#RegisterDBProxyTargetsResponse": { @@ -19722,6 +19816,9 @@ "smithy.api#documentation": "One or more DBProxyTarget
objects that are created when you register targets with a target group.
A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nThis setting doesn't apply to RDS Custom.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" + "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nThis setting doesn't apply to RDS Custom.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" } }, "CustomIamInstanceProfile": { @@ -22142,7 +22239,7 @@ "EnableCustomerOwnedIp": { "target": "com.amazonaws.rds#BooleanOptional", "traits": { - "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nThis setting doesn't apply to RDS Custom.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" + "smithy.api#documentation": "A value that indicates whether to enable a customer-owned IP address (CoIP) for an RDS on Outposts DB instance.
\nA CoIP provides local or external connectivity to resources in\n your Outpost subnets through your on-premises network. For some use cases, a CoIP can\n provide lower latency for connections to the DB instance from outside of its virtual\n private cloud (VPC) on your local network.
\nThis setting doesn't apply to RDS Custom.
\nFor more information about RDS on Outposts, see Working with Amazon RDS on Amazon Web Services Outposts \n in the Amazon RDS User Guide.
\nFor more information about CoIPs, see Customer-owned IP addresses \n in the Amazon Web Services Outposts User Guide.
" } }, "CustomIamInstanceProfile": { @@ -22734,6 +22831,9 @@ "smithy.api#documentation": "Specifies whether the database activity stream includes engine-native audit fields. This option only applies\n to an Oracle DB instance. By default, no engine-native audit fields are included.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#StartActivityStreamResponse": { @@ -22776,6 +22876,9 @@ "smithy.api#documentation": "Indicates whether engine-native audit fields are included in the database activity stream.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#StartDBCluster": { @@ -23096,6 +23199,9 @@ "smithy.api#documentation": "Specifies whether or not the database activity stream is to stop as soon as possible, \n regardless of the maintenance window for the database.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#StopActivityStreamResponse": { @@ -23119,6 +23225,9 @@ "smithy.api#documentation": "The status of the database activity stream.
" } } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#StopDBCluster": { @@ -23481,6 +23590,9 @@ "smithy.api#documentation": "The amount of time, in seconds, for the switchover to complete. The default is 300.
\nIf the switchover takes longer than the specified duration, then any changes are rolled back, \n and no changes are made to the environments.
" } } + }, + "traits": { + "smithy.api#input": {} } }, "com.amazonaws.rds#SwitchoverBlueGreenDeploymentResponse": { @@ -23489,6 +23601,9 @@ "BlueGreenDeployment": { "target": "com.amazonaws.rds#BlueGreenDeployment" } + }, + "traits": { + "smithy.api#output": {} } }, "com.amazonaws.rds#SwitchoverDetail": { diff --git a/codegen/sdk/aws-models/redshift-data.json b/codegen/sdk/aws-models/redshift-data.json index 95c9153a912..4f75db88322 100644 --- a/codegen/sdk/aws-models/redshift-data.json +++ b/codegen/sdk/aws-models/redshift-data.json @@ -2204,4 +2204,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/redshift-serverless.json b/codegen/sdk/aws-models/redshift-serverless.json index ef71f872569..363214f5453 100644 --- a/codegen/sdk/aws-models/redshift-serverless.json +++ b/codegen/sdk/aws-models/redshift-serverless.json @@ -5364,4 +5364,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/resiliencehub.json b/codegen/sdk/aws-models/resiliencehub.json index 85f1a4b4b20..f44dcab9dbf 100644 --- a/codegen/sdk/aws-models/resiliencehub.json +++ b/codegen/sdk/aws-models/resiliencehub.json @@ -7242,4 +7242,4 @@ } } } -} +} \ No newline at end of file diff --git a/codegen/sdk/aws-models/resource-explorer-2.json b/codegen/sdk/aws-models/resource-explorer-2.json index d4def561052..1e36e339d81 100644 --- a/codegen/sdk/aws-models/resource-explorer-2.json +++ b/codegen/sdk/aws-models/resource-explorer-2.json @@ -260,7 +260,7 @@ } ], "traits": { - "smithy.api#documentation": "Turns on Amazon Web Services Resource Explorer in the Amazon Web Services Region in which you called this operation by creating\n an index. Resource Explorer begins discovering the resources in this Region and stores the details\n about the resources in the index so that they can be queried by using the Search operation. You can create only one index in a Region.
\nThis operation creates only a local index. To promote the\n local index in one Amazon Web Services Region into the aggregator index for the Amazon Web Services account, use the\n UpdateIndexType operation. For more information, see Turning on cross-Region search by creating an aggregator index in the\n Amazon Web Services Resource Explorer User Guide.
\nFor more details about what happens when you turn on Resource Explorer in an Amazon Web Services Region, see\n Turn\n on Resource Explorer to index your resources in an Amazon Web Services Region in the\n Amazon Web Services Resource Explorer User Guide.
\nIf this is the first Amazon Web Services Region in which you've created an index for Resource Explorer, then\n this operation also creates a\n service-linked role in your Amazon Web Services account that allows Resource Explorer to enumerate\n your resources to populate the index.
\n\n Action:\n resource-explorer-2:CreateIndex
\n
\n Resource: The ARN of the index (as it will\n exist after the operation completes) in the Amazon Web Services Region and account in which\n you're trying to create the index. Use the wildcard character (*
)\n at the end of the string to match the eventual UUID. For example, the following\n Resource
element restricts the role or user to creating an\n index in only the us-east-2
Region of the specified account.
\n \"Resource\":\n \"arn:aws:resource-explorer-2:us-west-2:<account-id>:index/*\"
\n
Alternatively, you can use \"Resource\": \"*\"
to allow the role or\n user to create an index in any Region.
\n Action:\n iam:CreateServiceLinkedRole
\n
\n Resource: No specific resource (*).
\nThis permission is required only the first time you create an index to turn on\n Resource Explorer in the account. Resource Explorer uses this to create the service-linked\n role needed to index the resources in your account. Resource Explorer uses the\n same service-linked role for all additional indexes you create\n afterwards.
\nTurns on Amazon Web Services Resource Explorer in the Amazon Web Services Region in which you called this operation by creating\n an index. Resource Explorer begins discovering the resources in this Region and stores the details\n about the resources in the index so that they can be queried by using the Search operation. You can create only one index in a Region.
\nThis operation creates only a local index. To promote the\n local index in one Amazon Web Services Region into the aggregator index for the Amazon Web Services account, use the\n UpdateIndexType operation. For more information, see Turning on cross-Region search by creating an aggregator index in the\n Amazon Web Services Resource Explorer User Guide.
\nFor more details about what happens when you turn on Resource Explorer in an Amazon Web Services Region, see\n Turn\n on Resource Explorer to index your resources in an Amazon Web Services Region in the\n Amazon Web Services Resource Explorer User Guide.
\nIf this is the first Amazon Web Services Region in which you've created an index for Resource Explorer, then\n this operation also creates a\n service-linked role in your Amazon Web Services account that allows Resource Explorer to enumerate\n your resources to populate the index.
\n\n Action:\n resource-explorer-2:CreateIndex
\n
\n Resource: The ARN of the index (as it will\n exist after the operation completes) in the Amazon Web Services Region and account in which\n you're trying to create the index. Use the wildcard character (*
)\n at the end of the string to match the eventual UUID. For example, the following\n Resource
element restricts the role or user to creating an\n index in only the us-east-2
Region of the specified account.
\n \"Resource\":\n \"arn:aws:resource-explorer-2:us-west-2:<account-id>:index/*\"
\n
Alternatively, you can use \"Resource\": \"*\"
to allow the role or\n user to create an index in any Region.
\n Action:\n iam:CreateServiceLinkedRole
\n
\n Resource: No specific resource (*).
\nThis permission is required only the first time you create an index to turn on\n Resource Explorer in the account. Resource Explorer uses this to create the service-linked\n role needed to index the resources in your account. Resource Explorer uses the\n same service-linked role for all additional indexes you create\n afterwards.
\nCreates a view that users can query by using the Search operation.\n Results from queries that you make using this view include only resources that match the\n view's Filters
. For more information about Amazon Web Services Resource Explorer views, see Managing views\n in the Amazon Web Services Resource Explorer User Guide.
Only the principals with an IAM identity-based policy that grants Allow
\n to the Search
action on a Resource
with the Amazon resource name (ARN) of\n this view can Search using views you create with this\n operation.
Creates a view that users can query by using the Search operation.\n Results from queries that you make using this view include only resources that match the\n view's Filters
. For more information about Amazon Web Services Resource Explorer views, see Managing views\n in the Amazon Web Services Resource Explorer User Guide.
Only the principals with an IAM identity-based policy that grants Allow
\n to the Search
action on a Resource
with the Amazon resource name (ARN) of\n this view can Search using views you create with this\n operation.
Deletes the specified index and turns off Amazon Web Services Resource Explorer in the specified Amazon Web Services Region.\n When you delete an index, Resource Explorer stops discovering and indexing resources in that\n Region. Resource Explorer also deletes all views in that Region. These actions occur as\n asynchronous background tasks. You can check to see when the actions are complete by\n using the GetIndex operation and checking the Status
\n response value.
If the index you delete is the aggregator index for the Amazon Web Services account, you must\n wait 24 hours before you can promote another local index to be the\n aggregator index for the account. Users can't perform account-wide searches using\n Resource Explorer until another aggregator index is configured.
\nDeletes the specified index and turns off Amazon Web Services Resource Explorer in the specified Amazon Web Services Region.\n When you delete an index, Resource Explorer stops discovering and indexing resources in that\n Region. Resource Explorer also deletes all views in that Region. These actions occur as\n asynchronous background tasks. You can check to see when the actions are complete by\n using the GetIndex operation and checking the Status
\n response value.
The type of the index in this Region. For information about the aggregator index and\n how it differs from a local index, see Turning on cross-Region search\n by creating an aggregator index.
" + "smithy.api#documentation": "Specifies the type of the index in this Region. For information about the aggregator\n index and how it differs from a local index, see Turning on cross-Region search\n by creating an aggregator index.
" } }, "State": { "target": "com.amazonaws.resourceexplorer2#IndexState", "traits": { - "smithy.api#documentation": "The current state of the index in this Amazon Web Services Region.
" + "smithy.api#documentation": "Indicates the current state of the index in this Amazon Web Services Region.
" } }, "ReplicatingFrom": { "target": "com.amazonaws.resourceexplorer2#RegionList", "traits": { - "smithy.api#documentation": "This response value is present only if this index is\n Type=AGGREGATOR
.
A list of the Amazon Web Services Regions that replicate their content to the index in this\n Region.
" + "smithy.api#documentation": "If this index is Type=AGGREGATOR
, then this response value contains a\n list of the Amazon Web Services Regions that replicate their content to the index in this Region. Not\n present for a local index.
This response value is present only if this index is Type=LOCAL
.
The Amazon Web Services Region that contains the aggregator index, if one exists. If an aggregator index\n does exist then the Region in which you called this operation replicates its index\n information to the Region specified in this response value.
" + "smithy.api#documentation": "Identifies the Amazon Web Services Region that has an index set to Type=AGGREGATOR
, if\n one exists. If it does, then the Region you called this operation in replicates its\n index information to the Region specified in this response value. Not present if there\n isn't an aggregator index in the account.
Amazon Web Services Resource Explorer is a resource search and discovery service. By using Resource Explorer, you can\n explore your resources using an internet search engine-like experience. Examples of\n resources include Amazon Relational Database Service (Amazon RDS) instances, Amazon Simple Storage Service (Amazon S3) buckets, or Amazon DynamoDB\n tables. You can search for your resources using resource metadata like names, tags, and\n IDs. Resource Explorer can search across all of the Amazon Web Services Regions in your account in which you turn\n the service on, to simplify your cross-Region workloads.
\nResource Explorer scans the resources in each of the Amazon Web Services Regions in your Amazon Web Services account in which\n you turn on Resource Explorer. Resource Explorer creates\n and maintains an index in each Region, with the details of that Region's\n resources.
\nYou can search across all of the\n indexed Regions in your account by designating one of your Amazon Web Services Regions to\n contain the aggregator index for the account. When you promote a local index\n in a Region to become the aggregator index for the account, Resource Explorer automatically\n replicates the index information from all local indexes in the other Regions to the\n aggregator index. Therefore, the Region with the aggregator index has a copy of all resource\n information for all Regions in the account where you turned on Resource Explorer. As a result,\n views in the aggregator index Region include resources from all of the indexed Regions in your\n account.
\nFor more information about Amazon Web Services Resource Explorer, including how to enable and configure the\n service, see the Amazon Web Services Resource Explorer User Guide.
", + "smithy.api#documentation": "Amazon Web Services Resource Explorer is a resource search and discovery service. By using Resource Explorer, you can\n explore your resources using an internet search engine-like experience. Examples of\n resources include Amazon Relational Database Service (Amazon RDS) instances, Amazon Simple Storage Service (Amazon S3) buckets, or Amazon DynamoDB\n tables. You can search for your resources using resource metadata like names, tags, and\n IDs. Resource Explorer can search across all of the Amazon Web Services Regions in your account in which you turn\n the service on, to simplify your cross-Region workloads.
\nResource Explorer scans the resources in each of the Amazon Web Services Regions in your Amazon Web Services account in which\n you turn on Resource Explorer. Resource Explorer creates and\n maintains an index in each Region, with the details of that Region's\n resources.
\nYou can search across all of the indexed\n Regions in your account by designating one of your Amazon Web Services Regions to contain\n the aggregator index for the account. When you promote a local index in\n a Region to become the aggregator index for the account, Resource Explorer automatically\n replicates the index information from all local indexes in the other Regions to the\n aggregator index. Therefore, the Region with the aggregator index has a copy of all resource\n information for all Regions in the account where you turned on Resource Explorer. As a result,\n views in the aggregator index Region include resources from all of the indexed Regions in your\n account.
\nFor more information about Amazon Web Services Resource Explorer, including how to enable and configure the\n service, see the Amazon Web Services Resource Explorer User Guide.
", "smithy.api#title": "AWS Resource Explorer", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -1413,6 +1413,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -1607,58 +1616,6 @@ }, "smithy.rules#endpointTests": { "testCases": [ - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-south-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-south-2.api.aws" - } - }, - "params": { - "Region": "ap-south-2", - "UseDualStack": false, - "UseFIPS": false - } - }, { "documentation": "For region ap-south-1 with FIPS enabled and DualStack enabled", "expect": { @@ -1667,9 +1624,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-1" } }, { @@ -1680,9 +1637,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-south-1" } }, { @@ -1693,9 +1650,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -1706,9 +1663,9 @@ } }, "params": { - "Region": "ap-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-south-1" } }, { @@ -1719,9 +1676,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-1" } }, { @@ -1732,9 +1689,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-1" } }, { @@ -1745,9 +1702,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-1" } }, { @@ -1758,9 +1715,9 @@ } }, "params": { - "Region": "eu-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-1" } }, { @@ -1771,9 +1728,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-2" } }, { @@ -1784,9 +1741,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-south-2" } }, { @@ -1797,9 +1754,9 @@ } }, "params": { - "Region": "eu-south-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-2" } }, { @@ -1810,61 +1767,9 @@ } }, "params": { - "Region": "eu-south-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-south-2" } }, { @@ -1875,9 +1780,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-central-1" } }, { @@ -1888,9 +1793,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-central-1" } }, { @@ -1901,9 +1806,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-central-1" } }, { @@ -1914,9 +1819,9 @@ } }, "params": { - "Region": "me-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-central-1" } }, { @@ -1927,9 +1832,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ca-central-1" } }, { @@ -1940,9 +1845,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ca-central-1" } }, { @@ -1953,9 +1858,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -1966,9 +1871,9 @@ } }, "params": { - "Region": "ca-central-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ca-central-1" } }, { @@ -1979,9 +1884,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -1992,9 +1897,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-central-1" } }, { @@ -2005,9 +1910,9 @@ } }, "params": { - "Region": "eu-central-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -2018,61 +1923,9 @@ } }, "params": { - "Region": "eu-central-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region eu-central-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.eu-central-2.api.aws" - } - }, - "params": { - "Region": "eu-central-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-central-1" } }, { @@ -2083,9 +1936,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-1" } }, { @@ -2096,9 +1949,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-1" } }, { @@ -2109,9 +1962,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -2122,9 +1975,9 @@ } }, "params": { - "Region": "us-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-1" } }, { @@ -2135,9 +1988,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-2" } }, { @@ -2148,9 +2001,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-west-2" } }, { @@ -2161,9 +2014,9 @@ } }, "params": { - "Region": "us-west-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -2174,61 +2027,9 @@ } }, "params": { - "Region": "us-west-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region af-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.af-south-1.api.aws" - } - }, - "params": { - "Region": "af-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-west-2" } }, { @@ -2239,9 +2040,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-north-1" } }, { @@ -2252,9 +2053,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-north-1" } }, { @@ -2265,9 +2066,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -2278,9 +2079,9 @@ } }, "params": { - "Region": "eu-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-north-1" } }, { @@ -2291,9 +2092,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-3" } }, { @@ -2304,9 +2105,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-3" } }, { @@ -2317,9 +2118,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -2330,9 +2131,9 @@ } }, "params": { - "Region": "eu-west-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-3" } }, { @@ -2343,9 +2144,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-2" } }, { @@ -2356,9 +2157,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-2" } }, { @@ -2369,9 +2170,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -2382,9 +2183,9 @@ } }, "params": { - "Region": "eu-west-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-2" } }, { @@ -2395,9 +2196,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-1" } }, { @@ -2408,9 +2209,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "eu-west-1" } }, { @@ -2421,9 +2222,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -2434,9 +2235,9 @@ } }, "params": { - "Region": "eu-west-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "eu-west-1" } }, { @@ -2447,9 +2248,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-3" } }, { @@ -2460,9 +2261,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-3" } }, { @@ -2473,9 +2274,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-3" } }, { @@ -2486,9 +2287,9 @@ } }, "params": { - "Region": "ap-northeast-3", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-3" } }, { @@ -2499,9 +2300,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-2" } }, { @@ -2512,9 +2313,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-2" } }, { @@ -2525,9 +2326,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-2" } }, { @@ -2538,9 +2339,9 @@ } }, "params": { - "Region": "ap-northeast-2", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-2" } }, { @@ -2551,9 +2352,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-1" } }, { @@ -2564,9 +2365,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-northeast-1" } }, { @@ -2577,9 +2378,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -2590,9 +2391,9 @@ } }, "params": { - "Region": "ap-northeast-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-northeast-1" } }, { @@ -2603,9 +2404,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-south-1" } }, { @@ -2616,9 +2417,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "me-south-1" } }, { @@ -2629,9 +2430,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-south-1" } }, { @@ -2642,9 +2443,9 @@ } }, "params": { - "Region": "me-south-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "me-south-1" } }, { @@ -2655,9 +2456,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "sa-east-1" } }, { @@ -2668,9 +2469,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "sa-east-1" } }, { @@ -2681,9 +2482,9 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "sa-east-1" } }, { @@ -2694,477 +2495,165 @@ } }, "params": { - "Region": "sa-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "sa-east-1" } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2-fips.ap-east-1.api.aws" + "url": "https://resource-explorer-2-fips.ap-southeast-1.api.aws" } }, "params": { - "Region": "ap-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-1" } }, { - "documentation": "For region ap-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2-fips.ap-east-1.api.aws" + "url": "https://resource-explorer-2-fips.ap-southeast-1.api.aws" } }, "params": { - "Region": "ap-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-1" } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2.ap-east-1.api.aws" + "url": "https://resource-explorer-2.ap-southeast-1.api.aws" } }, "params": { - "Region": "ap-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { - "documentation": "For region ap-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2.ap-east-1.api.aws" + "url": "https://resource-explorer-2.ap-southeast-1.api.aws" } }, "params": { - "Region": "ap-east-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-1" } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://resource-explorer-2-fips.ap-southeast-2.api.aws" } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-2" } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://resource-explorer-2-fips.ap-southeast-2.api.aws" } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-2" } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://resource-explorer-2.ap-southeast-2.api.aws" } }, "params": { - "Region": "cn-north-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://resource-explorer-2.ap-southeast-2.api.aws" } }, "params": { - "Region": "cn-north-1", "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-2" } }, { - "documentation": "For region ca-west-1 with FIPS enabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2-fips.ca-west-1.api.aws" + "url": "https://resource-explorer-2-fips.ap-southeast-3.api.aws" } }, "params": { - "Region": "ca-west-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-3" } }, { - "documentation": "For region ca-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2-fips.ca-west-1.api.aws" + "url": "https://resource-explorer-2-fips.ap-southeast-3.api.aws" } }, "params": { - "Region": "ca-west-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "ap-southeast-3" } }, { - "documentation": "For region ca-west-1 with FIPS disabled and DualStack enabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2.ca-west-1.api.aws" + "url": "https://resource-explorer-2.ap-southeast-3.api.aws" } }, "params": { - "Region": "ca-west-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-3" } }, { - "documentation": "For region ca-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://resource-explorer-2.ca-west-1.api.aws" + "url": "https://resource-explorer-2.ap-southeast-3.api.aws" } }, "params": { - "Region": "ca-west-1", "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-1.api.aws" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-1.api.aws" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-1.api.aws" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-2.api.aws" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-2.api.aws" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-2.api.aws" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-3.api.aws" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-3.api.aws" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-3.api.aws" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-3 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-3.api.aws" - } - }, - "params": { - "Region": "ap-southeast-3", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-4.api.aws" - } - }, - "params": { - "Region": "ap-southeast-4", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-4.api.aws" - } - }, - "params": { - "Region": "ap-southeast-4", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-4.api.aws" - } - }, - "params": { - "Region": "ap-southeast-4", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-4 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-4.api.aws" - } - }, - "params": { - "Region": "ap-southeast-4", - "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-5 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-5.api.aws" - } - }, - "params": { - "Region": "ap-southeast-5", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-5 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-5.api.aws" - } - }, - "params": { - "Region": "ap-southeast-5", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-5 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-5.api.aws" - } - }, - "params": { - "Region": "ap-southeast-5", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-5 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-5.api.aws" - } - }, - "params": { - "Region": "ap-southeast-5", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "ap-southeast-3" } }, { @@ -3175,9 +2664,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -3188,9 +2677,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-1" } }, { @@ -3201,9 +2690,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -3214,61 +2703,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-6 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-6.api.aws" - } - }, - "params": { - "Region": "ap-southeast-6", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-6 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.ap-southeast-6.api.aws" - } - }, - "params": { - "Region": "ap-southeast-6", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region ap-southeast-6 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-6.api.aws" - } - }, - "params": { - "Region": "ap-southeast-6", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region ap-southeast-6 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.ap-southeast-6.api.aws" - } - }, - "params": { - "Region": "ap-southeast-6", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-1" } }, { @@ -3279,9 +2716,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": true, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-2" } }, { @@ -3292,9 +2729,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": false, - "UseFIPS": true + "UseFIPS": true, + "Region": "us-east-2" } }, { @@ -3305,9 +2742,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": true, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -3318,61 +2755,9 @@ } }, "params": { - "Region": "us-east-2", "UseDualStack": false, - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseDualStack": true, - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2-fips.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": true - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseDualStack": true, - "UseFIPS": false - } - }, - { - "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://resource-explorer-2.cn-northwest-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-northwest-1", - "UseDualStack": false, - "UseFIPS": false + "UseFIPS": false, + "Region": "us-east-2" } }, { @@ -3383,9 +2768,9 @@ } }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": false, + "Region": "us-east-1", "Endpoint": "https://example.com" } }, @@ -3395,9 +2780,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "us-east-1", "UseDualStack": false, "UseFIPS": true, + "Region": "us-east-1", "Endpoint": "https://example.com" } } @@ -3850,7 +3235,7 @@ } ], "traits": { - "smithy.api#documentation": "Changes the type of the index from one of the following types to the other. For more\n information about indexes and the role they perform in Amazon Web Services Resource Explorer, see Turning on\n cross-Region search by creating an aggregator index in the\n Amazon Web Services Resource Explorer User Guide.
\n\n \n AGGREGATOR
index type\n
The index contains information about resources from all Amazon Web Services Regions in the\n Amazon Web Services account in which you've created a Resource Explorer index. Resource information from\n all other Regions is replicated to this Region's index.
\nWhen you change the index type to AGGREGATOR
, Resource Explorer turns on\n replication of all discovered resource information from the other Amazon Web Services Regions\n in your account to this index. You can then, from this Region only, perform\n resource search queries that span all Amazon Web Services Regions in the Amazon Web Services account.\n Turning on replication from all other Regions is performed by asynchronous\n background tasks. You can check the status of the asynchronous tasks by using\n the GetIndex operation. When the asynchronous tasks complete,\n the Status
response of that operation changes from\n UPDATING
to ACTIVE
. After that, you can start to\n see results from other Amazon Web Services Regions in query results. However, it can take\n several hours for replication from all other Regions to complete.
You can have only one aggregator index per Amazon Web Services account. Before you can\n promote a different index to be the aggregator index for the account, you must\n first demote the existing aggregator index to type LOCAL
.
\n \n LOCAL
index type\n
The index contains information about resources in only the Amazon Web Services Region in\n which the index exists. If an aggregator index in another Region exists, then\n information in this local index is replicated to the aggregator index.
\nWhen you change the index type to LOCAL
, Resource Explorer turns off the\n replication of resource information from all other Amazon Web Services Regions in the\n Amazon Web Services account to this Region. The aggregator index remains in the\n UPDATING
state until all replication with other Regions\n successfully stops. You can check the status of the asynchronous task by using\n the GetIndex operation. When Resource Explorer successfully stops all\n replication with other Regions, the Status
response of that\n operation changes from UPDATING
to ACTIVE
. Separately,\n the resource information from other Regions that was previously stored in the\n index is deleted within 30 days by another background task. Until that\n asynchronous task completes, some results from other Regions can continue to\n appear in search results.
After you demote an aggregator index to a local index, you must wait\n 24 hours before you can promote another index to be the new\n aggregator index for the account.
\nChanges the type of the index from one of the following types to the other. For more\n information about indexes and the role they perform in Amazon Web Services Resource Explorer, see Turning on\n cross-Region search by creating an aggregator index in the\n Amazon Web Services Resource Explorer User Guide.
\n\n \n AGGREGATOR
index type\n
The index contains information about resources from all Amazon Web Services Regions in the\n Amazon Web Services account in which you've created a Resource Explorer index. Resource information from\n all other Regions is replicated to this Region's index.
\nWhen you change the index type to AGGREGATOR
, Resource Explorer turns on\n replication of all discovered resource information from the other Amazon Web Services Regions\n in your account to this index. You can then, from this Region only, perform\n resource search queries that span all Amazon Web Services Regions in the Amazon Web Services account.\n Turning on replication from all other Regions is performed by asynchronous\n background tasks. You can check the status of the asynchronous tasks by using\n the GetIndex operation. When the asynchronous tasks complete,\n the Status
response of that operation changes from\n UPDATING
to ACTIVE
. After that, you can start to\n see results from other Amazon Web Services Regions in query results. However, it can take\n several hours for replication from all other Regions to complete.
You can have only one aggregator index per Amazon Web Services account. Before you can\n promote a different index to be the aggregator index for the account, you must\n first demote the existing aggregator index to type LOCAL
.
\n \n LOCAL
index type\n
The index contains information about resources in only the Amazon Web Services Region in\n which the index exists. If an aggregator index in another Region exists, then\n information in this local index is replicated to the aggregator index.
\nWhen you change the index type to LOCAL
, Resource Explorer turns off the\n replication of resource information from all other Amazon Web Services Regions in the\n Amazon Web Services account to this Region. The aggregator index remains in the\n UPDATING
state until all replication with other Regions\n successfully stops. You can check the status of the asynchronous task by using\n the GetIndex operation. When Resource Explorer successfully stops all\n replication with other Regions, the Status
response of that\n operation changes from UPDATING
to ACTIVE
. Separately,\n the resource information from other Regions that was previously stored in the\n index is deleted within 30 days by another background task. Until that\n asynchronous task completes, some results from other Regions can continue to\n appear in search results.
After you demote an aggregator index to a local index, you must wait\n 24 hours before you can promote another index to be the new\n aggregator index for the account.
\nAccepts the transfer of a domain from another Amazon Web Services account to the\n\t\t\t\tcurrentAmazon Web Services account. You initiate a transfer between Amazon Web Services accounts using TransferDomainToAnotherAwsAccount.
\nIf you use the CLI command at accept-domain-transfer-from-another-aws-account, use JSON format as input\n\t\t\tinstead of text because otherwise CLI will throw an error from domain\n\t\t\ttransfer input that includes single quotes.
\nUse either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example,\n\t\t\t\tDomain Transfer from Aws Account 111122223333 has been cancelled
.
Accepts the transfer of a domain from another Amazon Web Services account to the\n\t\t\t\tcurrentAmazon Web Services account. You initiate a transfer between Amazon Web Services accounts using TransferDomainToAnotherAwsAccount.
\n\t\tIf you use the CLI command at accept-domain-transfer-from-another-aws-account, use JSON format as input\n\t\t\tinstead of text because otherwise CLI will throw an error from domain\n\t\t\ttransfer input that includes single quotes.
\n\n\t\tUse either \n\t\t\tListOperations or \n\t\t\tGetOperationDetail \n\t\t\tto determine whether the operation succeeded. \n\t\t\tGetOperationDetail \n\t\t\tprovides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled
. \n\t\t
The name of the domain that was specified when another Amazon Web Services account\n\t\t\tsubmitted a TransferDomainToAnotherAwsAccount request.
", + "smithy.api#documentation": "The name of the domain that was specified when another Amazon Web Services account submitted a \n\t\t\tTransferDomainToAnotherAwsAccount\n\t\t\trequest.\n\t\t
", "smithy.api#required": {} } }, "Password": { "target": "com.amazonaws.route53domains#String", "traits": { - "smithy.api#documentation": "The password that was returned by the TransferDomainToAnotherAwsAccount request.
", + "smithy.api#documentation": "The password that was returned by the \n\t\t\tTransferDomainToAnotherAwsAccount\n\t\t\trequest.\n\t\t
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The AcceptDomainTransferFromAnotherAwsAccount request includes the following\n\t\t\telements.
" + "smithy.api#documentation": "The AcceptDomainTransferFromAnotherAwsAccount request includes the following elements.
" } }, "com.amazonaws.route53domains#AcceptDomainTransferFromAnotherAwsAccountResponse": { @@ -83,12 +83,12 @@ "OperationId": { "target": "com.amazonaws.route53domains#OperationId", "traits": { - "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
" } } }, "traits": { - "smithy.api#documentation": "The AcceptDomainTransferFromAnotherAwsAccount response includes the following\n\t\t\telement.
" + "smithy.api#documentation": "The AcceptDomainTransferFromAnotherAwsAccount response includes the following element.
" } }, "com.amazonaws.route53domains#AccountId": { @@ -110,75 +110,13 @@ } } }, - "com.amazonaws.route53domains#AssociateDelegationSignerToDomain": { - "type": "operation", - "input": { - "target": "com.amazonaws.route53domains#AssociateDelegationSignerToDomainRequest" - }, - "output": { - "target": "com.amazonaws.route53domains#AssociateDelegationSignerToDomainResponse" - }, - "errors": [ - { - "target": "com.amazonaws.route53domains#DnssecLimitExceeded" - }, - { - "target": "com.amazonaws.route53domains#DuplicateRequest" - }, - { - "target": "com.amazonaws.route53domains#InvalidInput" - }, - { - "target": "com.amazonaws.route53domains#OperationLimitExceeded" - }, - { - "target": "com.amazonaws.route53domains#TLDRulesViolation" - }, - { - "target": "com.amazonaws.route53domains#UnsupportedTLD" - } - ], - "traits": { - "smithy.api#documentation": "Creates a delegation signer (DS) record in the registry zone for this domain\n\t\t\tname.
\nNote that creating DS record at the registry impacts DNSSEC validation of your DNS\n\t\t\trecords. This action may render your domain name unavailable on the internet if the\n\t\t\tsteps are completed in the wrong order, or with incorrect timing. For more information\n\t\t\tabout DNSSEC signing, see Configuring DNSSEC\n\t\t\t\tsigning in the Route 53 developer\n\t\t\tguide.
" - } - }, - "com.amazonaws.route53domains#AssociateDelegationSignerToDomainRequest": { - "type": "structure", - "members": { - "DomainName": { - "target": "com.amazonaws.route53domains#DomainName", - "traits": { - "smithy.api#documentation": "The name of the domain.
", - "smithy.api#required": {} - } - }, - "SigningAttributes": { - "target": "com.amazonaws.route53domains#DnssecSigningAttributes", - "traits": { - "smithy.api#documentation": "The information about a key, including the algorithm, public key-value, and\n\t\t\tflags.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.route53domains#AssociateDelegationSignerToDomainResponse": { - "type": "structure", - "members": { - "OperationId": { - "target": "com.amazonaws.route53domains#OperationId", - "traits": { - "smithy.api#documentation": "The identifier for tracking the progress of the request. To query the operation\n\t\t\tstatus, use GetOperationDetail.
" - } - } - } - }, "com.amazonaws.route53domains#BillingRecord": { "type": "structure", "members": { "DomainName": { "target": "com.amazonaws.route53domains#DomainName", "traits": { - "smithy.api#documentation": "The name of the domain that the billing record applies to. If the domain name contains\n\t\t\tcharacters other than a-z, 0-9, and - (hyphen), such as an internationalized domain\n\t\t\tname, then this value is in Punycode. For more information, see DNS\n\t\t\t\tDomain Name Format in the Amazon Route 53 Developer\n\t\t\t\tGuide.
" + "smithy.api#documentation": "The name of the domain that the billing record applies to. If the domain name contains characters \n\t\t\tother than a-z, 0-9, and - (hyphen), such as an internationalized domain name, then this value is in Punycode. \n\t\t\tFor more information, see \n\t\t\tDNS Domain Name Format \n\t\t\tin the Amazon Route 53 Developer Guide.
" } }, "Operation": { @@ -203,7 +141,7 @@ "target": "com.amazonaws.route53domains#Price", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The price that you were charged for the operation, in US dollars.
\nExample value: 12.0
" + "smithy.api#documentation": "The price that you were charged for the operation, in US dollars.
\n\t\tExample value: 12.0
" } } }, @@ -240,7 +178,7 @@ } ], "traits": { - "smithy.api#documentation": "Cancels the transfer of a domain from the current Amazon Web Services account to\n\t\t\tanother Amazon Web Services account. You initiate a transfer betweenAmazon Web Services accounts using TransferDomainToAnotherAwsAccount.
\nYou must cancel the transfer before the other Amazon Web Services account accepts\n\t\t\t\tthe transfer using AcceptDomainTransferFromAnotherAwsAccount.
\nUse either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example,\n\t\t\t\tDomain Transfer from Aws Account 111122223333 has been cancelled
.
Cancels the transfer of a domain from the current Amazon Web Services account to another Amazon Web Services account. You initiate a transfer betweenAmazon Web Services accounts using \n\t\t\tTransferDomainToAnotherAwsAccount.\n\t\t
\n\t\t\n\t\tYou must cancel the transfer before the other Amazon Web Services account accepts the transfer using \n\t\t\t\tAcceptDomainTransferFromAnotherAwsAccount.
\n\t\tUse either \n\t\t\tListOperations or \n\t\t\tGetOperationDetail \n\t\t\tto determine whether the operation succeeded. \n\t\t\tGetOperationDetail \n\t\t\tprovides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled
. \n\t\t
The CancelDomainTransferToAnotherAwsAccount request includes the following\n\t\t\telement.
" + "smithy.api#documentation": "The CancelDomainTransferToAnotherAwsAccount request includes the following element.
" } }, "com.amazonaws.route53domains#CancelDomainTransferToAnotherAwsAccountResponse": { @@ -264,12 +202,12 @@ "OperationId": { "target": "com.amazonaws.route53domains#OperationId", "traits": { - "smithy.api#documentation": "The identifier that TransferDomainToAnotherAwsAccount
returned to track\n\t\t\tthe progress of the request. Because the transfer request was canceled, the value is no\n\t\t\tlonger valid, and you can't use GetOperationDetail
to query the operation\n\t\t\tstatus.
The identifier that TransferDomainToAnotherAwsAccount
returned to track the progress of the request. \n\t\t\tBecause the transfer request was canceled, the value is no longer valid, and you can't use GetOperationDetail
\n\t\t\tto query the operation status.
The CancelDomainTransferToAnotherAwsAccount
response includes the\n\t\t\tfollowing element.
The CancelDomainTransferToAnotherAwsAccount
response includes the following element.
This operation checks the availability of one domain name. Note that if the\n\t\t\tavailability status of a domain is pending, you must submit another request to determine\n\t\t\tthe availability of the domain name.
" + "smithy.api#documentation": "This operation checks the availability of one domain name. Note that if the availability status of a domain is pending, you must \n\t\t\tsubmit another request to determine the availability of the domain name.
" } }, "com.amazonaws.route53domains#CheckDomainAvailabilityRequest": { @@ -298,7 +236,7 @@ "DomainName": { "target": "com.amazonaws.route53domains#DomainName", "traits": { - "smithy.api#documentation": "The name of the domain that you want to get availability for. The top-level domain\n\t\t\t(TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs,\n\t\t\tsee Domains that You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide.
\nThe domain name can contain only the following characters:
\nLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.\n\t\t\t\t
\nPeriod (.) to separate the labels in the name, such as the .
in\n\t\t\t\t\t\texample.com
.
Internationalized domain names are not supported for some top-level domains. To\n\t\t\tdetermine whether the TLD that you want to use supports internationalized domain names,\n\t\t\tsee Domains that You Can\n\t\t\t\tRegister with Amazon Route 53. For more information, see Formatting Internationalized Domain Names.
", + "smithy.api#documentation": "The name of the domain that you want to get availability for. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. \n\t\t\tFor a list of supported TLDs, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tThe domain name can contain only the following characters:
\n\t\tLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.
\nPeriod (.) to separate the labels in the name, such as the .
in example.com
.
Internationalized domain names are not supported for some top-level domains. To determine whether the TLD that you want to use supports \n\t\t\tinternationalized domain names, see \n\t\t\tDomains that You Can Register with Amazon Route 53. \n\t\t\tFor more information, see \n\t\t\tFormatting Internationalized Domain Names.\n\t\t
", "smithy.api#required": {} } }, @@ -319,7 +257,8 @@ "Availability": { "target": "com.amazonaws.route53domains#DomainAvailability", "traits": { - "smithy.api#documentation": "Whether the domain name is available for registering.
\nYou can register only domains designated as AVAILABLE
.
Valid values:
\nThe domain name is available.
\nThe domain name is reserved under specific conditions.
\nThe domain name is available and can be preordered.
\nThe TLD registry didn't reply with a definitive answer about whether the\n\t\t\t\t\t\tdomain name is available. Route 53 can return this response for a variety of\n\t\t\t\t\t\treasons, for example, the registry is performing maintenance. Try again\n\t\t\t\t\t\tlater.
\nThe TLD registry didn't return a response in the expected amount of time.\n\t\t\t\t\t\tWhen the response is delayed, it usually takes just a few extra seconds. You\n\t\t\t\t\t\tcan resubmit the request immediately.
\nThe domain name has been reserved for another person or\n\t\t\t\t\t\torganization.
\nThe domain name is not available.
\nThe domain name is not available.
\nThe domain name is forbidden.
\nWhether the domain name is available for registering.
\n\t\tYou can register only domains designated as AVAILABLE
.
Valid values:
\n\t\tThe domain name is available.
\nThe domain name is reserved under specific conditions.
\nThe domain name is available and can be preordered.
\nThe TLD registry didn't reply with a definitive answer about whether the domain name is available. \n\t\t\t\t\tRoute 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. \n\t\t\t\t\tTry again later.
\nThe TLD registry didn't return a response in the expected amount of time. When the response is delayed, \n\t\t\t\t\tit usually takes just a few extra seconds. You can resubmit the request immediately.
\nThe domain name has been reserved for another person or organization.
\nThe domain name is not available.
\nThe domain name is not available.
\nThe domain name is forbidden.
\nThe name of the domain that you want to transfer to Route 53. The top-level domain\n\t\t\t(TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs,\n\t\t\tsee Domains that You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide.
\nThe domain name can contain only the following characters:
\nLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.\n\t\t\t\t
\nPeriod (.) to separate the labels in the name, such as the .
in\n\t\t\t\t\t\texample.com
.
The name of the domain that you want to transfer to Route 53. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. \n\t\t\tFor a list of supported TLDs, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tThe domain name can contain only the following characters:
\n\t\tLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.
\nPeriod (.) to separate the labels in the name, such as the .
in example.com
.
If the registrar for the top-level domain (TLD) requires an authorization code to\n\t\t\ttransfer the domain, the code that you got from the current registrar for the\n\t\t\tdomain.
" + "smithy.api#documentation": "If the registrar for the top-level domain (TLD) requires an authorization code to transfer the domain, \n\t\t\tthe code that you got from the current registrar for the domain.
" } } }, @@ -374,7 +313,8 @@ "Transferability": { "target": "com.amazonaws.route53domains#DomainTransferability", "traits": { - "smithy.api#documentation": "A complex type that contains information about whether the specified domain can be\n\t\t\ttransferred to Route 53.
" + "smithy.api#documentation": "A complex type that contains information about whether the specified domain can be transferred to Route 53.
", + "smithy.api#required": {} } } }, @@ -391,29 +331,6 @@ } } }, - "com.amazonaws.route53domains#Consent": { - "type": "structure", - "members": { - "MaxPrice": { - "target": "com.amazonaws.route53domains#Price", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "Maximum amount the customer agreed to accept.
", - "smithy.api#required": {} - } - }, - "Currency": { - "target": "com.amazonaws.route53domains#Currency", - "traits": { - "smithy.api#documentation": " Currency for the MaxPrice
.
Customer's consent for the owner change request.
" - } - }, "com.amazonaws.route53domains#ContactDetail": { "type": "structure", "members": { @@ -432,7 +349,7 @@ "ContactType": { "target": "com.amazonaws.route53domains#ContactType", "traits": { - "smithy.api#documentation": "Indicates whether the contact is a person, company, association, or public\n\t\t\torganization. Note the following:
\nIf you specify a value other than PERSON
, you must also specify a\n\t\t\t\t\tvalue for OrganizationName
.
For some TLDs, the privacy protection available depends on the value that you\n\t\t\t\t\tspecify for Contact Type
. For the privacy protection settings for\n\t\t\t\t\tyour TLD, see Domains that You\n\t\t\t\t\t\tCan Register with Amazon Route 53 in the Amazon Route 53\n\t\t\t\t\t\tDeveloper Guide\n
For .es domains, the value of ContactType
must be\n\t\t\t\t\t\tPERSON
for all three contacts.
Indicates whether the contact is a person, company, association, or public organization. Note the following:
\n\t\tIf you specify a value other than PERSON
, you must also specify a value for \n\t\t\t\tOrganizationName
.
For some TLDs, the privacy protection available depends on the value that you specify for \n\t\t\t\tContact Type
. For the privacy protection settings for your TLD, see \n\t\t\t\tDomains that You Can \n\t\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer Guide\n
For .es domains, the value of ContactType
must be PERSON
for all three contacts.
The phone number of the contact.
\nConstraints: Phone number must be specified in the format \"+[country dialing\n\t\t\tcode].[number including any area code>]\". For example, a US phone number might appear\n\t\t\tas \"+1.1234567890\"
.
The phone number of the contact.
\n\t\tConstraints: Phone number must be specified in the format \"+[country\tdialing code].[number including any area code>]\". \n\t\t\tFor example, a US phone number might appear as \"+1.1234567890\"
.
Fax number of the contact.
\nConstraints: Phone number must be specified in the format \"+[country dialing\n\t\t\tcode].[number including any area code]\". For example, a US phone number might appear as\n\t\t\t\t\"+1.1234567890\"
.
Fax number of the contact.
\n\t\tConstraints: Phone number must be specified in the format \"+[country dialing code].[number including any area code]\". \n\t\t\tFor example, a US phone number might appear as \"+1.1234567890\"
.
A list of name-value pairs for parameters required by certain top-level\n\t\t\tdomains.
" + "smithy.api#documentation": "A list of name-value pairs for parameters required by certain top-level domains.
" } } }, @@ -526,1549 +443,1041 @@ } }, "com.amazonaws.route53domains#ContactType": { - "type": "enum", - "members": { - "PERSON": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PERSON" - } - }, - "COMPANY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "COMPANY" - } - }, - "ASSOCIATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ASSOCIATION" - } - }, - "PUBLIC_BODY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PUBLIC_BODY" - } - }, - "RESELLER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RESELLER" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PERSON", + "name": "PERSON" + }, + { + "value": "COMPANY", + "name": "COMPANY" + }, + { + "value": "ASSOCIATION", + "name": "ASSOCIATION" + }, + { + "value": "PUBLIC_BODY", + "name": "PUBLIC_BODY" + }, + { + "value": "RESELLER", + "name": "RESELLER" } - } + ] } }, "com.amazonaws.route53domains#CountryCode": { - "type": "enum", - "members": { - "AC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AC" - } - }, - "AD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AD" - } - }, - "AE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AE" - } - }, - "AF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AF" - } - }, - "AG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AG" - } - }, - "AI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AI" - } - }, - "AL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AL" - } - }, - "AM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AM" - } - }, - "AN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AN" - } - }, - "AO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AO" - } - }, - "AQ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AQ" - } - }, - "AR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AR" - } - }, - "AS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AS" - } - }, - "AT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AT" - } - }, - "AU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AU" - } - }, - "AW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AW" - } - }, - "AX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AX" - } - }, - "AZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AZ" - } - }, - "BA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BA" - } - }, - "BB": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BB" - } - }, - "BD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BD" - } - }, - "BE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BE" - } - }, - "BF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BF" - } - }, - "BG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BG" - } - }, - "BH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BH" - } - }, - "BI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BI" - } - }, - "BJ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BJ" - } - }, - "BL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BL" - } - }, - "BM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BM" - } - }, - "BN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BN" - } - }, - "BO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BO" - } - }, - "BQ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BQ" - } - }, - "BR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BR" - } - }, - "BS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BS" - } - }, - "BT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BT" - } - }, - "BV": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BV" - } - }, - "BW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BW" - } - }, - "BY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BY" - } - }, - "BZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "BZ" - } - }, - "CA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CA" - } - }, - "CC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CC" - } - }, - "CD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CD" - } - }, - "CF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CF" - } - }, - "CG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CG" - } - }, - "CH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CH" - } - }, - "CI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CI" - } - }, - "CK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CK" - } - }, - "CL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CL" - } - }, - "CM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CM" - } - }, - "CN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CN" - } - }, - "CO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CO" - } - }, - "CR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CR" - } - }, - "CU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CU" - } - }, - "CV": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CV" - } - }, - "CW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CW" - } - }, - "CX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CX" - } - }, - "CY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CY" - } - }, - "CZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CZ" - } - }, - "DE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DE" - } - }, - "DJ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DJ" - } - }, - "DK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DK" - } - }, - "DM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DM" - } - }, - "DO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DO" - } - }, - "DZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DZ" - } - }, - "EC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EC" - } - }, - "EE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EE" - } - }, - "EG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EG" - } - }, - "EH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EH" - } - }, - "ER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ER" - } - }, - "ES": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ES" - } - }, - "ET": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ET" - } - }, - "FI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FI" - } - }, - "FJ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FJ" - } - }, - "FK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FK" - } - }, - "FM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FM" - } - }, - "FO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FO" - } - }, - "FR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FR" - } - }, - "GA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GA" - } - }, - "GB": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GB" - } - }, - "GD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GD" - } - }, - "GE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GE" - } - }, - "GF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GF" - } - }, - "GG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GG" - } - }, - "GH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GH" - } - }, - "GI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GI" - } - }, - "GL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GL" - } - }, - "GM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GM" - } - }, - "GN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GN" - } - }, - "GP": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GP" - } - }, - "GQ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GQ" - } - }, - "GR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GR" - } - }, - "GS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GS" - } - }, - "GT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GT" - } - }, - "GU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GU" - } - }, - "GW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GW" - } - }, - "GY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "GY" - } - }, - "HK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HK" - } - }, - "HM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HM" - } - }, - "HN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HN" - } - }, - "HR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HR" - } - }, - "HT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HT" - } - }, - "HU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "HU" - } - }, - "ID": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ID" - } - }, - "IE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IE" - } - }, - "IL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IL" - } - }, - "IM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IM" - } - }, - "IN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IN" - } - }, - "IO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IO" - } - }, - "IQ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IQ" - } - }, - "IR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IR" - } - }, - "IS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IS" - } - }, - "IT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IT" - } - }, - "JE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "JE" - } - }, - "JM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "JM" - } - }, - "JO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "JO" - } - }, - "JP": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "JP" - } - }, - "KE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KE" - } - }, - "KG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KG" - } - }, - "KH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KH" - } - }, - "KI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KI" - } - }, - "KM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KM" - } - }, - "KN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KN" - } - }, - "KP": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KP" - } - }, - "KR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KR" - } - }, - "KW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KW" - } - }, - "KY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KY" - } - }, - "KZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "KZ" - } - }, - "LA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LA" - } - }, - "LB": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LB" - } - }, - "LC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LC" - } - }, - "LI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LI" - } - }, - "LK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LK" - } - }, - "LR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LR" - } - }, - "LS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LS" - } - }, - "LT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LT" - } - }, - "LU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LU" - } - }, - "LV": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LV" - } - }, - "LY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "LY" - } - }, - "MA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MA" - } - }, - "MC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MC" - } - }, - "MD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MD" - } - }, - "ME": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ME" - } - }, - "MF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MF" - } - }, - "MG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MG" - } - }, - "MH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MH" - } - }, - "MK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MK" - } - }, - "ML": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ML" - } - }, - "MM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MM" - } - }, - "MN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MN" - } - }, - "MO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MO" - } - }, - "MP": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MP" - } - }, - "MQ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MQ" - } - }, - "MR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MR" - } - }, - "MS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MS" - } - }, - "MT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MT" - } - }, - "MU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MU" - } - }, - "MV": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MV" - } - }, - "MW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MW" - } - }, - "MX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MX" - } - }, - "MY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MY" - } - }, - "MZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "MZ" - } - }, - "NA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NA" - } - }, - "NC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NC" - } - }, - "NE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NE" - } - }, - "NF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NF" - } - }, - "NG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NG" - } - }, - "NI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NI" - } - }, - "NL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NL" - } - }, - "NO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NO" - } - }, - "NP": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NP" - } - }, - "NR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NR" - } - }, - "NU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NU" - } - }, - "NZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "NZ" - } - }, - "OM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "OM" - } - }, - "PA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PA" - } - }, - "PE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PE" - } - }, - "PF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PF" - } - }, - "PG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PG" - } - }, - "PH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PH" - } - }, - "PK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PK" - } - }, - "PL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PL" - } - }, - "PM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PM" - } - }, - "PN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PN" - } - }, - "PR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PR" - } - }, - "PS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PS" - } - }, - "PT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PT" - } - }, - "PW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PW" - } - }, - "PY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PY" - } - }, - "QA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "QA" - } - }, - "RE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RE" - } - }, - "RO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RO" - } - }, - "RS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RS" - } - }, - "RU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RU" - } - }, - "RW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RW" - } - }, - "SA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SA" - } - }, - "SB": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SB" - } - }, - "SC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SC" - } - }, - "SD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SD" - } - }, - "SE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SE" - } - }, - "SG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SG" - } - }, - "SH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SH" - } - }, - "SI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SI" - } - }, - "SJ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SJ" - } - }, - "SK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SK" - } - }, - "SL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SL" - } - }, - "SM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SM" - } - }, - "SN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SN" - } - }, - "SO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SO" - } - }, - "SR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SR" - } - }, - "SS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SS" - } - }, - "ST": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ST" - } - }, - "SV": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SV" - } - }, - "SX": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SX" - } - }, - "SY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SY" - } - }, - "SZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SZ" - } - }, - "TC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TC" - } - }, - "TD": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TD" - } - }, - "TF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TF" - } - }, - "TG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TG" - } - }, - "TH": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TH" - } - }, - "TJ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TJ" - } - }, - "TK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TK" - } - }, - "TL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TL" - } - }, - "TM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TM" - } - }, - "TN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TN" - } - }, - "TO": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TO" - } - }, - "TP": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TP" - } - }, - "TR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TR" - } - }, - "TT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TT" - } - }, - "TV": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TV" - } - }, - "TW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TW" - } - }, - "TZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TZ" - } - }, - "UA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UA" - } - }, - "UG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UG" - } - }, - "US": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "US" - } - }, - "UY": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UY" - } - }, - "UZ": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UZ" - } - }, - "VA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VA" - } - }, - "VC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VC" - } - }, - "VE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VE" - } - }, - "VG": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VG" - } - }, - "VI": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VI" - } - }, - "VN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VN" - } - }, - "VU": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "VU" - } - }, - "WF": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WF" - } - }, - "WS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "WS" - } - }, - "YE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "YE" - } - }, - "YT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "YT" - } - }, - "ZA": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ZA" - } - }, - "ZM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ZM" - } - }, - "ZW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ZW" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AC", + "name": "AC" + }, + { + "value": "AD", + "name": "AD" + }, + { + "value": "AE", + "name": "AE" + }, + { + "value": "AF", + "name": "AF" + }, + { + "value": "AG", + "name": "AG" + }, + { + "value": "AI", + "name": "AI" + }, + { + "value": "AL", + "name": "AL" + }, + { + "value": "AM", + "name": "AM" + }, + { + "value": "AN", + "name": "AN" + }, + { + "value": "AO", + "name": "AO" + }, + { + "value": "AQ", + "name": "AQ" + }, + { + "value": "AR", + "name": "AR" + }, + { + "value": "AS", + "name": "AS" + }, + { + "value": "AT", + "name": "AT" + }, + { + "value": "AU", + "name": "AU" + }, + { + "value": "AW", + "name": "AW" + }, + { + "value": "AX", + "name": "AX" + }, + { + "value": "AZ", + "name": "AZ" + }, + { + "value": "BA", + "name": "BA" + }, + { + "value": "BB", + "name": "BB" + }, + { + "value": "BD", + "name": "BD" + }, + { + "value": "BE", + "name": "BE" + }, + { + "value": "BF", + "name": "BF" + }, + { + "value": "BG", + "name": "BG" + }, + { + "value": "BH", + "name": "BH" + }, + { + "value": "BI", + "name": "BI" + }, + { + "value": "BJ", + "name": "BJ" + }, + { + "value": "BL", + "name": "BL" + }, + { + "value": "BM", + "name": "BM" + }, + { + "value": "BN", + "name": "BN" + }, + { + "value": "BO", + "name": "BO" + }, + { + "value": "BQ", + "name": "BQ" + }, + { + "value": "BR", + "name": "BR" + }, + { + "value": "BS", + "name": "BS" + }, + { + "value": "BT", + "name": "BT" + }, + { + "value": "BV", + "name": "BV" + }, + { + "value": "BW", + "name": "BW" + }, + { + "value": "BY", + "name": "BY" + }, + { + "value": "BZ", + "name": "BZ" + }, + { + "value": "CA", + "name": "CA" + }, + { + "value": "CC", + "name": "CC" + }, + { + "value": "CD", + "name": "CD" + }, + { + "value": "CF", + "name": "CF" + }, + { + "value": "CG", + "name": "CG" + }, + { + "value": "CH", + "name": "CH" + }, + { + "value": "CI", + "name": "CI" + }, + { + "value": "CK", + "name": "CK" + }, + { + "value": "CL", + "name": "CL" + }, + { + "value": "CM", + "name": "CM" + }, + { + "value": "CN", + "name": "CN" + }, + { + "value": "CO", + "name": "CO" + }, + { + "value": "CR", + "name": "CR" + }, + { + "value": "CU", + "name": "CU" + }, + { + "value": "CV", + "name": "CV" + }, + { + "value": "CW", + "name": "CW" + }, + { + "value": "CX", + "name": "CX" + }, + { + "value": "CY", + "name": "CY" + }, + { + "value": "CZ", + "name": "CZ" + }, + { + "value": "DE", + "name": "DE" + }, + { + "value": "DJ", + "name": "DJ" + }, + { + "value": "DK", + "name": "DK" + }, + { + "value": "DM", + "name": "DM" + }, + { + "value": "DO", + "name": "DO" + }, + { + "value": "DZ", + "name": "DZ" + }, + { + "value": "EC", + "name": "EC" + }, + { + "value": "EE", + "name": "EE" + }, + { + "value": "EG", + "name": "EG" + }, + { + "value": "EH", + "name": "EH" + }, + { + "value": "ER", + "name": "ER" + }, + { + "value": "ES", + "name": "ES" + }, + { + "value": "ET", + "name": "ET" + }, + { + "value": "FI", + "name": "FI" + }, + { + "value": "FJ", + "name": "FJ" + }, + { + "value": "FK", + "name": "FK" + }, + { + "value": "FM", + "name": "FM" + }, + { + "value": "FO", + "name": "FO" + }, + { + "value": "FR", + "name": "FR" + }, + { + "value": "GA", + "name": "GA" + }, + { + "value": "GB", + "name": "GB" + }, + { + "value": "GD", + "name": "GD" + }, + { + "value": "GE", + "name": "GE" + }, + { + "value": "GF", + "name": "GF" + }, + { + "value": "GG", + "name": "GG" + }, + { + "value": "GH", + "name": "GH" + }, + { + "value": "GI", + "name": "GI" + }, + { + "value": "GL", + "name": "GL" + }, + { + "value": "GM", + "name": "GM" + }, + { + "value": "GN", + "name": "GN" + }, + { + "value": "GP", + "name": "GP" + }, + { + "value": "GQ", + "name": "GQ" + }, + { + "value": "GR", + "name": "GR" + }, + { + "value": "GS", + "name": "GS" + }, + { + "value": "GT", + "name": "GT" + }, + { + "value": "GU", + "name": "GU" + }, + { + "value": "GW", + "name": "GW" + }, + { + "value": "GY", + "name": "GY" + }, + { + "value": "HK", + "name": "HK" + }, + { + "value": "HM", + "name": "HM" + }, + { + "value": "HN", + "name": "HN" + }, + { + "value": "HR", + "name": "HR" + }, + { + "value": "HT", + "name": "HT" + }, + { + "value": "HU", + "name": "HU" + }, + { + "value": "ID", + "name": "ID" + }, + { + "value": "IE", + "name": "IE" + }, + { + "value": "IL", + "name": "IL" + }, + { + "value": "IM", + "name": "IM" + }, + { + "value": "IN", + "name": "IN" + }, + { + "value": "IO", + "name": "IO" + }, + { + "value": "IQ", + "name": "IQ" + }, + { + "value": "IR", + "name": "IR" + }, + { + "value": "IS", + "name": "IS" + }, + { + "value": "IT", + "name": "IT" + }, + { + "value": "JE", + "name": "JE" + }, + { + "value": "JM", + "name": "JM" + }, + { + "value": "JO", + "name": "JO" + }, + { + "value": "JP", + "name": "JP" + }, + { + "value": "KE", + "name": "KE" + }, + { + "value": "KG", + "name": "KG" + }, + { + "value": "KH", + "name": "KH" + }, + { + "value": "KI", + "name": "KI" + }, + { + "value": "KM", + "name": "KM" + }, + { + "value": "KN", + "name": "KN" + }, + { + "value": "KP", + "name": "KP" + }, + { + "value": "KR", + "name": "KR" + }, + { + "value": "KW", + "name": "KW" + }, + { + "value": "KY", + "name": "KY" + }, + { + "value": "KZ", + "name": "KZ" + }, + { + "value": "LA", + "name": "LA" + }, + { + "value": "LB", + "name": "LB" + }, + { + "value": "LC", + "name": "LC" + }, + { + "value": "LI", + "name": "LI" + }, + { + "value": "LK", + "name": "LK" + }, + { + "value": "LR", + "name": "LR" + }, + { + "value": "LS", + "name": "LS" + }, + { + "value": "LT", + "name": "LT" + }, + { + "value": "LU", + "name": "LU" + }, + { + "value": "LV", + "name": "LV" + }, + { + "value": "LY", + "name": "LY" + }, + { + "value": "MA", + "name": "MA" + }, + { + "value": "MC", + "name": "MC" + }, + { + "value": "MD", + "name": "MD" + }, + { + "value": "ME", + "name": "ME" + }, + { + "value": "MF", + "name": "MF" + }, + { + "value": "MG", + "name": "MG" + }, + { + "value": "MH", + "name": "MH" + }, + { + "value": "MK", + "name": "MK" + }, + { + "value": "ML", + "name": "ML" + }, + { + "value": "MM", + "name": "MM" + }, + { + "value": "MN", + "name": "MN" + }, + { + "value": "MO", + "name": "MO" + }, + { + "value": "MP", + "name": "MP" + }, + { + "value": "MQ", + "name": "MQ" + }, + { + "value": "MR", + "name": "MR" + }, + { + "value": "MS", + "name": "MS" + }, + { + "value": "MT", + "name": "MT" + }, + { + "value": "MU", + "name": "MU" + }, + { + "value": "MV", + "name": "MV" + }, + { + "value": "MW", + "name": "MW" + }, + { + "value": "MX", + "name": "MX" + }, + { + "value": "MY", + "name": "MY" + }, + { + "value": "MZ", + "name": "MZ" + }, + { + "value": "NA", + "name": "NA" + }, + { + "value": "NC", + "name": "NC" + }, + { + "value": "NE", + "name": "NE" + }, + { + "value": "NF", + "name": "NF" + }, + { + "value": "NG", + "name": "NG" + }, + { + "value": "NI", + "name": "NI" + }, + { + "value": "NL", + "name": "NL" + }, + { + "value": "NO", + "name": "NO" + }, + { + "value": "NP", + "name": "NP" + }, + { + "value": "NR", + "name": "NR" + }, + { + "value": "NU", + "name": "NU" + }, + { + "value": "NZ", + "name": "NZ" + }, + { + "value": "OM", + "name": "OM" + }, + { + "value": "PA", + "name": "PA" + }, + { + "value": "PE", + "name": "PE" + }, + { + "value": "PF", + "name": "PF" + }, + { + "value": "PG", + "name": "PG" + }, + { + "value": "PH", + "name": "PH" + }, + { + "value": "PK", + "name": "PK" + }, + { + "value": "PL", + "name": "PL" + }, + { + "value": "PM", + "name": "PM" + }, + { + "value": "PN", + "name": "PN" + }, + { + "value": "PR", + "name": "PR" + }, + { + "value": "PS", + "name": "PS" + }, + { + "value": "PT", + "name": "PT" + }, + { + "value": "PW", + "name": "PW" + }, + { + "value": "PY", + "name": "PY" + }, + { + "value": "QA", + "name": "QA" + }, + { + "value": "RE", + "name": "RE" + }, + { + "value": "RO", + "name": "RO" + }, + { + "value": "RS", + "name": "RS" + }, + { + "value": "RU", + "name": "RU" + }, + { + "value": "RW", + "name": "RW" + }, + { + "value": "SA", + "name": "SA" + }, + { + "value": "SB", + "name": "SB" + }, + { + "value": "SC", + "name": "SC" + }, + { + "value": "SD", + "name": "SD" + }, + { + "value": "SE", + "name": "SE" + }, + { + "value": "SG", + "name": "SG" + }, + { + "value": "SH", + "name": "SH" + }, + { + "value": "SI", + "name": "SI" + }, + { + "value": "SJ", + "name": "SJ" + }, + { + "value": "SK", + "name": "SK" + }, + { + "value": "SL", + "name": "SL" + }, + { + "value": "SM", + "name": "SM" + }, + { + "value": "SN", + "name": "SN" + }, + { + "value": "SO", + "name": "SO" + }, + { + "value": "SR", + "name": "SR" + }, + { + "value": "SS", + "name": "SS" + }, + { + "value": "ST", + "name": "ST" + }, + { + "value": "SV", + "name": "SV" + }, + { + "value": "SX", + "name": "SX" + }, + { + "value": "SY", + "name": "SY" + }, + { + "value": "SZ", + "name": "SZ" + }, + { + "value": "TC", + "name": "TC" + }, + { + "value": "TD", + "name": "TD" + }, + { + "value": "TF", + "name": "TF" + }, + { + "value": "TG", + "name": "TG" + }, + { + "value": "TH", + "name": "TH" + }, + { + "value": "TJ", + "name": "TJ" + }, + { + "value": "TK", + "name": "TK" + }, + { + "value": "TL", + "name": "TL" + }, + { + "value": "TM", + "name": "TM" + }, + { + "value": "TN", + "name": "TN" + }, + { + "value": "TO", + "name": "TO" + }, + { + "value": "TP", + "name": "TP" + }, + { + "value": "TR", + "name": "TR" + }, + { + "value": "TT", + "name": "TT" + }, + { + "value": "TV", + "name": "TV" + }, + { + "value": "TW", + "name": "TW" + }, + { + "value": "TZ", + "name": "TZ" + }, + { + "value": "UA", + "name": "UA" + }, + { + "value": "UG", + "name": "UG" + }, + { + "value": "US", + "name": "US" + }, + { + "value": "UY", + "name": "UY" + }, + { + "value": "UZ", + "name": "UZ" + }, + { + "value": "VA", + "name": "VA" + }, + { + "value": "VC", + "name": "VC" + }, + { + "value": "VE", + "name": "VE" + }, + { + "value": "VG", + "name": "VG" + }, + { + "value": "VI", + "name": "VI" + }, + { + "value": "VN", + "name": "VN" + }, + { + "value": "VU", + "name": "VU" + }, + { + "value": "WF", + "name": "WF" + }, + { + "value": "WS", + "name": "WS" + }, + { + "value": "YE", + "name": "YE" + }, + { + "value": "YT", + "name": "YT" + }, + { + "value": "ZA", + "name": "ZA" + }, + { + "value": "ZM", + "name": "ZM" + }, + { + "value": "ZW", + "name": "ZW" } - } + ] } }, "com.amazonaws.route53domains#Currency": { @@ -2112,7 +1521,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation deletes the specified domain. This action is permanent. For more\n\t\t\tinformation, see Deleting a domain name\n\t\t\t\tregistration.
\nTo transfer the domain registration to another registrar, use the transfer process\n\t\t\tthat’s provided by the registrar to which you want to transfer the registration.\n\t\t\tOtherwise, the following apply:
\nYou can’t get a refund for the cost of a deleted domain registration.
\nThe registry for the top-level domain might hold the domain name for a brief\n\t\t\t\t\ttime before releasing it for other users to register (varies by registry).\n\t\t\t\t
\nWhen the registration has been deleted, we'll send you a confirmation to the\n\t\t\t\t\tregistrant contact. The email will come from\n\t\t\t\t\t\tnoreply@domainnameverification.net
or\n\t\t\t\t\t\tnoreply@registrar.amazon.com
.
This operation deletes the specified domain. This action is permanent. For more information,\n\t\t\tsee Deleting a domain name registration.
\n\t\tTo transfer the domain registration to another registrar, use the transfer process that’s\n\t\t\tprovided by the registrar to which you want to transfer the registration. Otherwise,\n\t\t\tthe following apply:
\n\t\tYou can’t get a refund for the cost of a deleted domain registration.
\nThe registry for the top-level domain might hold the domain name for a brief time before releasing it for other users to register (varies by registry).
\nWhen the registration has been deleted, we'll send you a confirmation to the registrant\n\t\t\t\tcontact. The email will come from\n\t\t\t\tnoreply@domainnameverification.net
or\n\t\t\t\tnoreply@registrar.amazon.com
.
Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. \n\t\t\tTo query the operation status, use GetOperationDetail.
" } } } @@ -2158,7 +1567,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation deletes the specified tags for a domain.
\nAll tag operations are eventually consistent; subsequent operations might not\n\t\t\timmediately represent all issued operations.
" + "smithy.api#documentation": "This operation deletes the specified tags for a domain.
\n\t\tAll tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.
" } }, "com.amazonaws.route53domains#DeleteTagsForDomainRequest": { @@ -2204,7 +1613,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation disables automatic renewal of domain registration for the specified\n\t\t\tdomain.
" + "smithy.api#documentation": "This operation disables automatic renewal of domain registration for the specified domain.
" } }, "com.amazonaws.route53domains#DisableDomainAutoRenewRequest": { @@ -2249,7 +1658,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation removes the transfer lock on the domain (specifically the\n\t\t\t\tclientTransferProhibited
status) to allow domain transfers. We\n\t\t\trecommend you refrain from performing this action unless you intend to transfer the\n\t\t\tdomain to a different registrar. Successful submission returns an operation ID that you\n\t\t\tcan use to track the progress and completion of the action. If the request is not\n\t\t\tcompleted successfully, the domain registrant will be notified by email.
This operation removes the transfer lock on the domain (specifically the\n\t\t\tclientTransferProhibited
status) to allow domain transfers. We recommend\n\t\t\tyou refrain from performing this action unless you intend to transfer the domain to a\n\t\t\tdifferent registrar. Successful submission returns an operation ID that you can use to track\n\t\t\tthe progress and completion of the action. If the request is not completed successfully, the\n\t\t\tdomain registrant will be notified by email.
Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" - } - } - }, - "traits": { - "smithy.api#documentation": "The DisableDomainTransferLock response includes the following element.
" - } - }, - "com.amazonaws.route53domains#DisassociateDelegationSignerFromDomain": { - "type": "operation", - "input": { - "target": "com.amazonaws.route53domains#DisassociateDelegationSignerFromDomainRequest" - }, - "output": { - "target": "com.amazonaws.route53domains#DisassociateDelegationSignerFromDomainResponse" - }, - "errors": [ - { - "target": "com.amazonaws.route53domains#DuplicateRequest" - }, - { - "target": "com.amazonaws.route53domains#InvalidInput" - }, - { - "target": "com.amazonaws.route53domains#OperationLimitExceeded" - }, - { - "target": "com.amazonaws.route53domains#TLDRulesViolation" - }, - { - "target": "com.amazonaws.route53domains#UnsupportedTLD" - } - ], - "traits": { - "smithy.api#documentation": "Deletes a delegation signer (DS) record in the registry zone for this domain\n\t\t\tname.
" - } - }, - "com.amazonaws.route53domains#DisassociateDelegationSignerFromDomainRequest": { - "type": "structure", - "members": { - "DomainName": { - "target": "com.amazonaws.route53domains#DomainName", - "traits": { - "smithy.api#documentation": "Name of the domain.
", + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
", "smithy.api#required": {} } - }, - "Id": { - "target": "com.amazonaws.route53domains#String", - "traits": { - "smithy.api#documentation": "An internal identification number assigned to each DS record after it’s created. You\n\t\t\tcan retrieve it as part of DNSSEC information returned by GetDomainDetail.
", - "smithy.api#required": {} - } - } - } - }, - "com.amazonaws.route53domains#DisassociateDelegationSignerFromDomainResponse": { - "type": "structure", - "members": { - "OperationId": { - "target": "com.amazonaws.route53domains#OperationId", - "traits": { - "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" - } - } - } - }, - "com.amazonaws.route53domains#DnssecKey": { - "type": "structure", - "members": { - "Algorithm": { - "target": "com.amazonaws.route53domains#NullableInteger", - "traits": { - "smithy.api#documentation": "The number of the public key’s cryptographic algorithm according to an IANA assignment.
\nIf Route 53 is your DNS service, set this to 13.
\nFor more information about enabling DNSSEC signing, see Enabling DNSSEC signing and establishing a chain of trust.
" - } - }, - "Flags": { - "target": "com.amazonaws.route53domains#NullableInteger", - "traits": { - "smithy.api#documentation": "Defines the type of key. It can be either a KSK (key-signing-key, value 257) or ZSK\n\t\t\t(zone-signing-key, value 256). Using KSK is always encouraged. Only use ZSK if your DNS\n\t\t\tprovider isn't Route 53 and you don’t have KSK available.
\nIf you have KSK and ZSK keys, always use KSK to create a delegations signer (DS)\n\t\t\trecord. If you have ZSK keys only – use ZSK to create a DS record.
" - } - }, - "PublicKey": { - "target": "com.amazonaws.route53domains#DnssecPublicKey", - "traits": { - "smithy.api#documentation": "The base64-encoded public key part of the key pair that is passed to the registry\n\t\t\t.
" - } - }, - "DigestType": { - "target": "com.amazonaws.route53domains#NullableInteger", - "traits": { - "smithy.api#documentation": "The number of the DS digest algorithm according to an IANA assignment.
\nFor more information, see IANA\n\t\t\tfor DNSSEC Delegation Signer (DS) Resource Record (RR) Type Digest Algorithms.
" - } - }, - "Digest": { - "target": "com.amazonaws.route53domains#String", - "traits": { - "smithy.api#documentation": "The delegation signer digest.
\nDigest is calculated from the public key provided using specified digest algorithm and\n\t\t\tthis digest is the actual value returned from the registry nameservers as the value of\n\t\t\tDS records.
" - } - }, - "KeyTag": { - "target": "com.amazonaws.route53domains#NullableInteger", - "traits": { - "smithy.api#documentation": "A numeric identification of the DNSKEY record referred to by this DS record.
" - } - }, - "Id": { - "target": "com.amazonaws.route53domains#String", - "traits": { - "smithy.api#documentation": "An ID assigned to each DS record created by AssociateDelegationSignerToDomain.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Information about the DNSSEC key.
\nYou get this from your DNS provider and then give it to Route 53 (by using\n\t\t\t\tAssociateDelegationSignerToDomain) to pass it to the registry to establish\n\t\t\tthe chain of trust.
" - } - }, - "com.amazonaws.route53domains#DnssecKeyList": { - "type": "list", - "member": { - "target": "com.amazonaws.route53domains#DnssecKey" - } - }, - "com.amazonaws.route53domains#DnssecLimitExceeded": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.route53domains#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": " This error is returned if you call AssociateDelegationSignerToDomain
\n\t\t\twhen the specified domain has reached the maximum number of DS records. You can't add\n\t\t\tany additional DS records unless you delete an existing one first.
Algorithm which was used to generate the digest from the public key.
" - } - }, - "Flags": { - "target": "com.amazonaws.route53domains#NullableInteger", - "traits": { - "smithy.api#documentation": "Defines the type of key. It can be either a KSK (key-signing-key, value 257) or ZSK\n\t\t\t(zone-signing-key, value 256). Using KSK is always encouraged. Only use ZSK if your DNS\n\t\t\tprovider isn't Route 53 and you don’t have KSK available.
\nIf you have KSK and ZSK keys, always use KSK to create a delegations signer (DS)\n\t\t\trecord. If you have ZSK keys only – use ZSK to create a DS record.
" - } - }, - "PublicKey": { - "target": "com.amazonaws.route53domains#DnssecPublicKey", - "traits": { - "smithy.api#documentation": "The base64-encoded public key part of the key pair that is passed to the registry.\n\t\t
" - } } }, "traits": { - "smithy.api#documentation": "Information about a delegation signer (DS) record that was created in the registry by\n\t\t\t\tAssociateDelegationSignerToDomain.
" + "smithy.api#documentation": "The DisableDomainTransferLock response includes the following element.
" } }, "com.amazonaws.route53domains#DomainAuthCode": { @@ -2455,56 +1702,42 @@ } }, "com.amazonaws.route53domains#DomainAvailability": { - "type": "enum", - "members": { - "AVAILABLE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AVAILABLE" - } - }, - "AVAILABLE_RESERVED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AVAILABLE_RESERVED" - } - }, - "AVAILABLE_PREORDER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "AVAILABLE_PREORDER" - } - }, - "UNAVAILABLE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UNAVAILABLE" - } - }, - "UNAVAILABLE_PREMIUM": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UNAVAILABLE_PREMIUM" - } - }, - "UNAVAILABLE_RESTRICTED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UNAVAILABLE_RESTRICTED" - } - }, - "RESERVED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RESERVED" - } - }, - "DONT_KNOW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DONT_KNOW" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AVAILABLE", + "name": "AVAILABLE" + }, + { + "value": "AVAILABLE_RESERVED", + "name": "AVAILABLE_RESERVED" + }, + { + "value": "AVAILABLE_PREORDER", + "name": "AVAILABLE_PREORDER" + }, + { + "value": "UNAVAILABLE", + "name": "UNAVAILABLE" + }, + { + "value": "UNAVAILABLE_PREMIUM", + "name": "UNAVAILABLE_PREMIUM" + }, + { + "value": "UNAVAILABLE_RESTRICTED", + "name": "UNAVAILABLE_RESTRICTED" + }, + { + "value": "RESERVED", + "name": "RESERVED" + }, + { + "value": "DONT_KNOW", + "name": "DONT_KNOW" } - } + ] } }, "com.amazonaws.route53domains#DomainLimitExceeded": { @@ -2612,7 +1845,7 @@ "Availability": { "target": "com.amazonaws.route53domains#String", "traits": { - "smithy.api#documentation": "Whether the domain name is available for registering.
\nYou can register only the domains that are designated as\n\t\t\t\tAVAILABLE
.
Valid values:
\nThe domain name is available.
\nThe domain name is reserved under specific conditions.
\nThe domain name is available and can be preordered.
\nThe TLD registry didn't reply with a definitive answer about whether the\n\t\t\t\t\t\tdomain name is available. Route 53 can return this response for a variety of\n\t\t\t\t\t\treasons, for example, the registry is performing maintenance. Try again\n\t\t\t\t\t\tlater.
\nThe TLD registry didn't return a response in the expected amount of time.\n\t\t\t\t\t\tWhen the response is delayed, it usually takes just a few extra seconds. You\n\t\t\t\t\t\tcan resubmit the request immediately.
\nThe domain name has been reserved for another person or\n\t\t\t\t\t\torganization.
\nThe domain name is not available.
\nThe domain name is not available.
\nThe domain name is forbidden.
\nWhether the domain name is available for registering.
\n\t\tYou can register only the domains that are designated as AVAILABLE
.
Valid values:
\n\t\tThe domain name is available.
\nThe domain name is reserved under specific conditions.
\nThe domain name is available and can be preordered.
\nThe TLD registry didn't reply with a definitive answer about whether the domain name is available. \n\t\t\t\t\tRoute 53 can return this response for a variety of reasons, for example, the registry is performing maintenance. \n\t\t\t\t\tTry again later.
\nThe TLD registry didn't return a response in the expected amount of time. When the response is delayed, \n\t\t\t\t\tit usually takes just a few extra seconds. You can resubmit the request immediately.
\nThe domain name has been reserved for another person or organization.
\nThe domain name is not available.
\nThe domain name is not available.
\nThe domain name is forbidden.
\nThe name of the domain that the summary information applies to.
" + "smithy.api#documentation": "The name of the domain that the summary information applies to.
", + "smithy.api#required": {} } }, "AutoRenew": { @@ -2644,13 +1878,13 @@ "TransferLock": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether a domain is locked from unauthorized transfer to another\n\t\t\tparty.
" + "smithy.api#documentation": "Indicates whether a domain is locked from unauthorized transfer to another party.
" } }, "Expiry": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "Expiration date of the domain in Unix time format and Coordinated Universal Time\n\t\t\t(UTC).
" + "smithy.api#documentation": "Expiration date of the domain in Unix time format and Coordinated Universal Time (UTC).
" } } }, @@ -2672,7 +1906,7 @@ } }, "traits": { - "smithy.api#documentation": "A complex type that contains information about whether the specified domain can be\n\t\t\ttransferred to Route 53.
" + "smithy.api#documentation": "A complex type that contains information about whether the specified domain can be transferred to Route 53.
" } }, "com.amazonaws.route53domains#DuplicateRequest": { @@ -2729,7 +1963,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation configures Amazon Route 53 to automatically renew the specified domain\n\t\t\tbefore the domain registration expires. The cost of renewing your domain registration is\n\t\t\tbilled to your Amazon Web Services account.
\nThe period during which you can renew a domain name varies by TLD. For a list of TLDs\n\t\t\tand their renewal policies, see Domains That You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide. Route 53 requires that you renew before the end of the renewal\n\t\t\tperiod so we can complete processing before the deadline.
" + "smithy.api#documentation": "This operation configures Amazon Route 53 to automatically renew the specified domain before the domain registration expires. \n\t\t\tThe cost of renewing your domain registration is billed to your Amazon Web Services account.
\n\t\tThe period during which you can renew a domain name varies by TLD. For a list of TLDs and their renewal policies, see \n\t\t\tDomains That You Can Register with Amazon Route 53 \n\t\t\tin the Amazon Route 53 Developer Guide. Route 53 requires that you renew before the end of the renewal period \n\t\t\tso we can complete processing before the deadline.
" } }, "com.amazonaws.route53domains#EnableDomainAutoRenewRequest": { @@ -2774,7 +2008,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation sets the transfer lock on the domain (specifically the\n\t\t\t\tclientTransferProhibited
status) to prevent domain transfers.\n\t\t\tSuccessful submission returns an operation ID that you can use to track the progress and\n\t\t\tcompletion of the action. If the request is not completed successfully, the domain\n\t\t\tregistrant will be notified by email.
This operation sets the transfer lock on the domain (specifically the clientTransferProhibited
status) \n\t\t\tto prevent domain transfers. Successful submission returns an operation ID that you can use to track the progress and \n\t\t\tcompletion of the action. If the request is not completed successfully, the domain registrant will be notified by email.
Identifier for tracking the progress of the request. To use this ID to query the\n\t\t\toperation status, use GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.
", + "smithy.api#required": {} } } }, @@ -2815,7 +2050,7 @@ "Name": { "target": "com.amazonaws.route53domains#ExtraParamName", "traits": { - "smithy.api#documentation": "The name of an additional parameter that is required by a top-level domain. Here are\n\t\t\tthe top-level domains that require additional parameters and the names of the parameters\n\t\t\tthat they require:
\n\n AU_ID_NUMBER
\n
\n AU_ID_TYPE
\n
Valid values include the following:
\n\n ABN
(Australian business number)
\n ACN
(Australian company number)
\n TM
(Trademark number)
\n BRAND_NUMBER
\n
\n CA_BUSINESS_ENTITY_TYPE
\n
Valid values include the following:
\n\n BANK
(Bank)
\n COMMERCIAL_COMPANY
(Commercial\n\t\t\t\t\t\t\t\t\t\tcompany)
\n COMPANY
(Company)
\n COOPERATION
(Cooperation)
\n COOPERATIVE
(Cooperative)
\n COOPRIX
(Cooprix)
\n CORP
(Corporation)
\n CREDIT_UNION
(Credit union)
\n FOMIA
(Federation of mutual insurance\n\t\t\t\t\t\t\t\t\t\tassociations)
\n INC
(Incorporated)
\n LTD
(Limited)
\n LTEE
(Limitée)
\n LLC
(Limited liability corporation)
\n LLP
(Limited liability partnership)
\n LTE
(Lte.)
\n MBA
(Mutual benefit association)
\n MIC
(Mutual insurance company)
\n NFP
(Not-for-profit corporation)
\n SA
(S.A.)
\n SAVINGS_COMPANY
(Savings company)
\n SAVINGS_UNION
(Savings union)
\n SARL
(Société à responsabilité\n\t\t\t\t\t\t\t\t\t\tlimitée)
\n TRUST
(Trust)
\n ULC
(Unlimited liability corporation)
\n CA_LEGAL_TYPE
\n
When ContactType
is PERSON
, valid values\n\t\t\t\t\t\t\t\tinclude the following:
\n ABO
(Aboriginal Peoples indigenous to\n\t\t\t\t\t\t\t\t\t\tCanada)
\n CCT
(Canadian citizen)
\n LGR
(Legal Representative of a Canadian\n\t\t\t\t\t\t\t\t\t\tCitizen or Permanent Resident)
\n RES
(Permanent resident of Canada)
When ContactType
is a value other than\n\t\t\t\t\t\t\t\t\tPERSON
, valid values include the following:
\n ASS
(Canadian unincorporated\n\t\t\t\t\t\t\t\t\t\tassociation)
\n CCO
(Canadian corporation)
\n EDU
(Canadian educational institution)
\n GOV
(Government or government entity in\n\t\t\t\t\t\t\t\t\t\tCanada)
\n HOP
(Canadian Hospital)
\n INB
(Indian Band recognized by the Indian Act\n\t\t\t\t\t\t\t\t\t\tof Canada)
\n LAM
(Canadian Library, Archive, or\n\t\t\t\t\t\t\t\t\t\tMuseum)
\n MAJ
(Her/His Majesty the Queen/King)
\n OMK
(Official mark registered in\n\t\t\t\t\t\t\t\t\t\tCanada)
\n PLT
(Canadian Political Party)
\n PRT
(Partnership Registered in Canada)
\n TDM
(Trademark registered in Canada)
\n TRD
(Canadian Trade Union)
\n TRS
(Trust established in Canada)
\n ES_IDENTIFICATION
\n
The value of ES_IDENTIFICATION
depends on the\n\t\t\t\t\t\t\t\tfollowing values:
The value of ES_LEGAL_FORM
\n
The value of ES_IDENTIFICATION_TYPE
\n
\n If ES_LEGAL_FORM
is any value\n\t\t\t\t\t\t\t\t\tother than INDIVIDUAL
:\n
Specify 1 letter + 8 numbers (CIF [Certificado de\n\t\t\t\t\t\t\t\t\t\tIdentificación Fiscal])
\nExample: B12345678
\n\n If ES_LEGAL_FORM
is\n\t\t\t\t\t\t\t\t\t\tINDIVIDUAL
, the value that you specify for\n\t\t\t\t\t\t\t\t\t\tES_IDENTIFICATION
depends on the value of\n\t\t\t\t\t\t\t\t\t\tES_IDENTIFICATION_TYPE
:\n
If ES_IDENTIFICATION_TYPE
is\n\t\t\t\t\t\t\t\t\t\t\tDNI_AND_NIF
(for Spanish contacts):
Specify 8 numbers + 1 letter (DNI [Documento\n\t\t\t\t\t\t\t\t\t\t\t\tNacional de Identidad], NIF [Número de\n\t\t\t\t\t\t\t\t\t\t\t\tIdentificación Fiscal])
\nExample: 12345678M
\nIf ES_IDENTIFICATION_TYPE
is NIE
\n\t\t\t\t\t\t\t\t\t\t(for foreigners with legal residence):
Specify 1 letter + 7 numbers + 1 letter ( NIE\n\t\t\t\t\t\t\t\t\t\t\t\t[Número de Identidad de Extranjero])
\nExample: Y1234567X
\nIf ES_IDENTIFICATION_TYPE
is\n\t\t\t\t\t\t\t\t\t\t\tOTHER
(for contacts outside of\n\t\t\t\t\t\t\t\t\t\tSpain):
Specify a passport number, drivers license number,\n\t\t\t\t\t\t\t\t\t\t\t\tor national identity card number
\n\n ES_IDENTIFICATION_TYPE
\n
Valid values include the following:
\n\n DNI_AND_NIF
(For Spanish contacts)
\n NIE
(For foreigners with legal\n\t\t\t\t\t\t\t\t\t\tresidence)
\n OTHER
(For contacts outside of Spain)
\n ES_LEGAL_FORM
\n
Valid values include the following:
\n\n ASSOCIATION
\n
\n CENTRAL_GOVERNMENT_BODY
\n
\n CIVIL_SOCIETY
\n
\n COMMUNITY_OF_OWNERS
\n
\n COMMUNITY_PROPERTY
\n
\n CONSULATE
\n
\n COOPERATIVE
\n
\n DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL
\n
\n ECONOMIC_INTEREST_GROUP
\n
\n EMBASSY
\n
\n ENTITY_MANAGING_NATURAL_AREAS
\n
\n FARM_PARTNERSHIP
\n
\n FOUNDATION
\n
\n GENERAL_AND_LIMITED_PARTNERSHIP
\n
\n GENERAL_PARTNERSHIP
\n
\n INDIVIDUAL
\n
\n LIMITED_COMPANY
\n
\n LOCAL_AUTHORITY
\n
\n LOCAL_PUBLIC_ENTITY
\n
\n MUTUAL_INSURANCE_COMPANY
\n
\n NATIONAL_PUBLIC_ENTITY
\n
\n ORDER_OR_RELIGIOUS_INSTITUTION
\n
\n OTHERS (Only for contacts outside of\n\t\t\t\t\t\t\t\t\t\tSpain)
\n
\n POLITICAL_PARTY
\n
\n PROFESSIONAL_ASSOCIATION
\n
\n PUBLIC_LAW_ASSOCIATION
\n
\n PUBLIC_LIMITED_COMPANY
\n
\n REGIONAL_GOVERNMENT_BODY
\n
\n REGIONAL_PUBLIC_ENTITY
\n
\n SAVINGS_BANK
\n
\n SPANISH_OFFICE
\n
\n SPORTS_ASSOCIATION
\n
\n SPORTS_FEDERATION
\n
\n SPORTS_LIMITED_COMPANY
\n
\n TEMPORARY_ALLIANCE_OF_ENTERPRISES
\n
\n TRADE_UNION
\n
\n WORKER_OWNED_COMPANY
\n
\n WORKER_OWNED_LIMITED_COMPANY
\n
\n EU_COUNTRY_OF_CITIZENSHIP
\n
\n BIRTH_DATE_IN_YYYY_MM_DD
\n
\n FI_BUSINESS_NUMBER
\n
\n FI_ID_NUMBER
\n
\n FI_NATIONALITY
\n
Valid values include the following:
\n\n FINNISH
\n
\n NOT_FINNISH
\n
\n FI_ORGANIZATION_TYPE
\n
Valid values include the following:
\n\n COMPANY
\n
\n CORPORATION
\n
\n GOVERNMENT
\n
\n INSTITUTION
\n
\n POLITICAL_PARTY
\n
\n PUBLIC_COMMUNITY
\n
\n TOWNSHIP
\n
\n BIRTH_CITY
\n
\n BIRTH_COUNTRY
\n
\n BIRTH_DATE_IN_YYYY_MM_DD
\n
\n BIRTH_DEPARTMENT
: Specify the INSEE code that\n\t\t\t\t\t\t\t\tcorresponds with the department where the contact was born. If the\n\t\t\t\t\t\t\t\tcontact was born somewhere other than France or its overseas\n\t\t\t\t\t\t\t\tdepartments, specify 99
. For more information,\n\t\t\t\t\t\t\t\tincluding a list of departments and the corresponding INSEE numbers,\n\t\t\t\t\t\t\t\tsee the Wikipedia entry Departments of France.
\n BRAND_NUMBER
\n
\n IT_NATIONALITY
\n
\n IT_PIN
\n
\n IT_REGISTRANT_ENTITY_TYPE
\n
Valid values include the following:
\n\n FOREIGNERS
\n
\n FREELANCE_WORKERS
(Freelance workers and\n\t\t\t\t\t\t\t\t\t\tprofessionals)
\n ITALIAN_COMPANIES
(Italian companies and\n\t\t\t\t\t\t\t\t\t\tone-person companies)
\n NON_PROFIT_ORGANIZATIONS
\n
\n OTHER_SUBJECTS
\n
\n PUBLIC_ORGANIZATIONS
\n
\n BIRTH_DATE_IN_YYYY_MM_DD
\n
\n RU_PASSPORT_DATA
\n
\n BIRTH_COUNTRY
\n
\n SE_ID_NUMBER
\n
\n SG_ID_NUMBER
\n
\n UK_CONTACT_TYPE
\n
Valid values include the following:
\n\n CRC
(UK Corporation by Royal Charter)
\n FCORP
(Non-UK Corporation)
\n FIND
(Non-UK Individual, representing\n\t\t\t\t\t\t\t\t\t\tself)
\n FOTHER
(Non-UK Entity that does not fit into\n\t\t\t\t\t\t\t\t\t\tany other category)
\n GOV
(UK Government Body)
\n IND
(UK Individual (representing\n\t\t\t\t\t\t\t\t\t\tself))
\n IP
(UK Industrial/Provident Registered\n\t\t\t\t\t\t\t\t\t\tCompany)
\n LLP
(UK Limited Liability Partnership)
\n LTD
(UK Limited Company)
\n OTHER
(UK Entity that does not fit into any\n\t\t\t\t\t\t\t\t\t\tother category)
\n PLC
(UK Public Limited Company)
\n PTNR
(UK Partnership)
\n RCHAR
(UK Registered Charity)
\n SCH
(UK School)
\n STAT
(UK Statutory Body)
\n STRA
(UK Sole Trader)
\n UK_COMPANY_NUMBER
\n
In addition, many TLDs require a VAT_NUMBER
.
The name of an additional parameter that is required by a top-level domain. Here are the top-level domains that require \n\t\t\tadditional parameters and the names of the parameters that they require:
\n\n\t\t\n AU_ID_NUMBER
\n
\n AU_ID_TYPE
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n ABN
(Australian business number)
\n ACN
(Australian company number)
\n TM
(Trademark number)
\n BRAND_NUMBER
\n
\n CA_BUSINESS_ENTITY_TYPE
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n BANK
(Bank)
\n COMMERCIAL_COMPANY
(Commercial company)
\n COMPANY
(Company)
\n COOPERATION
(Cooperation)
\n COOPERATIVE
(Cooperative)
\n COOPRIX
(Cooprix)
\n CORP
(Corporation)
\n CREDIT_UNION
(Credit union)
\n FOMIA
(Federation of mutual insurance associations)
\n INC
(Incorporated)
\n LTD
(Limited)
\n LTEE
(Limitée)
\n LLC
(Limited liability corporation)
\n LLP
(Limited liability partnership)
\n LTE
(Lte.)
\n MBA
(Mutual benefit association)
\n MIC
(Mutual insurance company)
\n NFP
(Not-for-profit corporation)
\n SA
(S.A.)
\n SAVINGS_COMPANY
(Savings company)
\n SAVINGS_UNION
(Savings union)
\n SARL
(Société à responsabilité limitée)
\n TRUST
(Trust)
\n ULC
(Unlimited liability corporation)
\n CA_LEGAL_TYPE
\n
When ContactType
is PERSON
, valid values include the following:
\n ABO
(Aboriginal Peoples indigenous to Canada)
\n CCT
(Canadian citizen)
\n LGR
(Legal Representative of a Canadian Citizen or Permanent Resident)
\n RES
(Permanent resident of Canada)
When ContactType
is a value other than PERSON
, valid values include the following:
\n ASS
(Canadian unincorporated association)
\n CCO
(Canadian corporation)
\n EDU
(Canadian educational institution)
\n GOV
(Government or government entity in Canada)
\n HOP
(Canadian Hospital)
\n INB
(Indian Band recognized by the Indian Act of Canada)
\n LAM
(Canadian Library, Archive, or Museum)
\n MAJ
(Her/His Majesty the Queen/King)
\n OMK
(Official mark registered in Canada)
\n PLT
(Canadian Political Party)
\n PRT
(Partnership Registered in Canada)
\n TDM
(Trademark registered in Canada)
\n TRD
(Canadian Trade Union)
\n TRS
(Trust established in Canada)
\n ES_IDENTIFICATION
\n
The value of ES_IDENTIFICATION
depends on the following values:
The value of ES_LEGAL_FORM
\n
The value of ES_IDENTIFICATION_TYPE
\n
\n If ES_LEGAL_FORM
is any value other than INDIVIDUAL
:\n
Specify 1 letter + 8 numbers (CIF [Certificado de Identificación Fiscal])
\nExample: B12345678
\n\n If ES_LEGAL_FORM
is INDIVIDUAL
, the value that you specify for \n\t\t\t\t\t\t\t\tES_IDENTIFICATION
depends on the value of ES_IDENTIFICATION_TYPE
:\n
If ES_IDENTIFICATION_TYPE
is DNI_AND_NIF
(for Spanish contacts):
Specify 8 numbers + 1 letter (DNI [Documento Nacional de Identidad], NIF [Número de Identificación Fiscal])
\nExample: 12345678M
\nIf ES_IDENTIFICATION_TYPE
is NIE
(for foreigners with legal residence):
Specify 1 letter + 7 numbers + 1 letter ( NIE [Número de Identidad de Extranjero])
\nExample: Y1234567X
\nIf ES_IDENTIFICATION_TYPE
is OTHER
(for contacts outside of Spain):
Specify a passport number, drivers license number, or national identity card number
\n\n ES_IDENTIFICATION_TYPE
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n DNI_AND_NIF
(For Spanish contacts)
\n NIE
(For foreigners with legal residence)
\n OTHER
(For contacts outside of Spain)
\n ES_LEGAL_FORM
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n ASSOCIATION
\n
\n CENTRAL_GOVERNMENT_BODY
\n
\n CIVIL_SOCIETY
\n
\n COMMUNITY_OF_OWNERS
\n
\n COMMUNITY_PROPERTY
\n
\n CONSULATE
\n
\n COOPERATIVE
\n
\n DESIGNATION_OF_ORIGIN_SUPERVISORY_COUNCIL
\n
\n ECONOMIC_INTEREST_GROUP
\n
\n EMBASSY
\n
\n ENTITY_MANAGING_NATURAL_AREAS
\n
\n FARM_PARTNERSHIP
\n
\n FOUNDATION
\n
\n GENERAL_AND_LIMITED_PARTNERSHIP
\n
\n GENERAL_PARTNERSHIP
\n
\n INDIVIDUAL
\n
\n LIMITED_COMPANY
\n
\n LOCAL_AUTHORITY
\n
\n LOCAL_PUBLIC_ENTITY
\n
\n MUTUAL_INSURANCE_COMPANY
\n
\n NATIONAL_PUBLIC_ENTITY
\n
\n ORDER_OR_RELIGIOUS_INSTITUTION
\n
\n OTHERS (Only for contacts outside of Spain)
\n
\n POLITICAL_PARTY
\n
\n PROFESSIONAL_ASSOCIATION
\n
\n PUBLIC_LAW_ASSOCIATION
\n
\n PUBLIC_LIMITED_COMPANY
\n
\n REGIONAL_GOVERNMENT_BODY
\n
\n REGIONAL_PUBLIC_ENTITY
\n
\n SAVINGS_BANK
\n
\n SPANISH_OFFICE
\n
\n SPORTS_ASSOCIATION
\n
\n SPORTS_FEDERATION
\n
\n SPORTS_LIMITED_COMPANY
\n
\n TEMPORARY_ALLIANCE_OF_ENTERPRISES
\n
\n TRADE_UNION
\n
\n WORKER_OWNED_COMPANY
\n
\n WORKER_OWNED_LIMITED_COMPANY
\n
\n EU_COUNTRY_OF_CITIZENSHIP
\n
\n BIRTH_DATE_IN_YYYY_MM_DD
\n
\n FI_BUSINESS_NUMBER
\n
\n FI_ID_NUMBER
\n
\n FI_NATIONALITY
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n FINNISH
\n
\n NOT_FINNISH
\n
\n FI_ORGANIZATION_TYPE
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n COMPANY
\n
\n CORPORATION
\n
\n GOVERNMENT
\n
\n INSTITUTION
\n
\n POLITICAL_PARTY
\n
\n PUBLIC_COMMUNITY
\n
\n TOWNSHIP
\n
\n BIRTH_CITY
\n
\n BIRTH_COUNTRY
\n
\n BIRTH_DATE_IN_YYYY_MM_DD
\n
\n BIRTH_DEPARTMENT
: Specify the INSEE code that corresponds with the department where the contact was born. \n\t\t\t\t\t\t\tIf the contact was born somewhere other than France or its overseas departments, specify 99
. For more information, \n\t\t\t\t\t\t\tincluding a list of departments and the corresponding INSEE numbers, see the Wikipedia entry \n\t\t\t\t\t\t\tDepartments of France.
\n BRAND_NUMBER
\n
\n IT_NATIONALITY
\n
\n IT_PIN
\n
\n IT_REGISTRANT_ENTITY_TYPE
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n FOREIGNERS
\n
\n FREELANCE_WORKERS
(Freelance workers and professionals)
\n ITALIAN_COMPANIES
(Italian companies and one-person companies)
\n NON_PROFIT_ORGANIZATIONS
\n
\n OTHER_SUBJECTS
\n
\n PUBLIC_ORGANIZATIONS
\n
\n BIRTH_DATE_IN_YYYY_MM_DD
\n
\n RU_PASSPORT_DATA
\n
\n BIRTH_COUNTRY
\n
\n SE_ID_NUMBER
\n
\n SG_ID_NUMBER
\n
\n UK_CONTACT_TYPE
\n
Valid values include the following:
\n\t\t\t\t\t\t\t\n CRC
(UK Corporation by Royal Charter)
\n FCORP
(Non-UK Corporation)
\n FIND
(Non-UK Individual, representing self)
\n FOTHER
(Non-UK Entity that does not fit into any other category)
\n GOV
(UK Government Body)
\n IND
(UK Individual (representing self))
\n IP
(UK Industrial/Provident Registered Company)
\n LLP
(UK Limited Liability Partnership)
\n LTD
(UK Limited Company)
\n OTHER
(UK Entity that does not fit into any other category)
\n PLC
(UK Public Limited Company)
\n PTNR
(UK Partnership)
\n RCHAR
(UK Registered Charity)
\n SCH
(UK School)
\n STAT
(UK Statutory Body)
\n STRA
(UK Sole Trader)
\n UK_COMPANY_NUMBER
\n
In addition, many TLDs require a VAT_NUMBER
.
The operator values for filtering domain names. The values can be:
\n\n LE
: Less than, or equal to
\n GE
: Greater than, or equal to
\n BEGINS_WITH
: Begins with
The operator values for filtering domain names. The values can be:
\n\t\t\n LE
: Less than, or equal to
\n GE
: Greater than, or equal to
\n BEGINS_WITH
: Begins with
An array of strings presenting values to compare. Only 1 item in the list is\n\t\t\tcurrently supported.
", + "smithy.api#documentation": "An array of strings presenting values to compare. Only 1 item in the list is currently supported.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Information for the filtering of a list of domains returned by ListDomains.
" + "smithy.api#documentation": "Information for the filtering of a list of domains returned by \n\t\t\tListDomains.
" } }, "com.amazonaws.route53domains#FilterConditions": { @@ -3103,7 +2274,7 @@ } ], "traits": { - "smithy.api#documentation": "For operations that require confirmation that the email address for the registrant\n\t\t\tcontact is valid, such as registering a new domain, this operation returns information\n\t\t\tabout whether the registrant contact has responded.
\nIf you want us to resend the email, use the\n\t\t\t\tResendContactReachabilityEmail
operation.
For operations that require confirmation that the email address for the registrant contact is valid, \n\t\t\tsuch as registering a new domain, this operation returns information about whether the registrant contact has responded.
\n\t\tIf you want us to resend the email, use the ResendContactReachabilityEmail
operation.
The name of the domain for which you want to know whether the registrant contact has\n\t\t\tconfirmed that the email address is valid.
" + "smithy.api#documentation": "The name of the domain for which you want to know whether the registrant contact has confirmed that the email address is valid.
" } } } @@ -3129,7 +2300,7 @@ "status": { "target": "com.amazonaws.route53domains#ReachabilityStatus", "traits": { - "smithy.api#documentation": "Whether the registrant contact has responded. Values include the following:
\nWe sent the confirmation email and haven't received a response yet.
\nWe sent the email and got confirmation from the registrant contact.
\nThe time limit expired before the registrant contact responded.
\nWhether the registrant contact has responded. Values include the following:
\n\t\tWe sent the confirmation email and haven't received a response yet.
\nWe sent the email and got confirmation from the registrant contact.
\nThe time limit expired before the registrant contact responded.
\nThis operation returns detailed information about a specified domain that is\n\t\t\tassociated with the current Amazon Web Services account. Contact information for the\n\t\t\tdomain is also returned as part of the output.
" + "smithy.api#documentation": "This operation returns detailed information about a specified domain that is associated with the current Amazon Web Services account. \n\t\t\tContact information for the domain is also returned as part of the output.
" } }, "com.amazonaws.route53domains#GetDomainDetailRequest": { @@ -3175,13 +2346,15 @@ "DomainName": { "target": "com.amazonaws.route53domains#DomainName", "traits": { - "smithy.api#documentation": "The name of a domain.
" + "smithy.api#documentation": "The name of a domain.
", + "smithy.api#required": {} } }, "Nameservers": { "target": "com.amazonaws.route53domains#NameserverList", "traits": { - "smithy.api#documentation": "The name servers of the domain.
" + "smithy.api#documentation": "The\n\t\t\tname of the domain.
", + "smithy.api#required": {} } }, "AutoRenew": { @@ -3193,49 +2366,52 @@ "AdminContact": { "target": "com.amazonaws.route53domains#ContactDetail", "traits": { - "smithy.api#documentation": "Provides details about the domain administrative contact.
" + "smithy.api#documentation": "Provides details about the domain administrative contact.
", + "smithy.api#required": {} } }, "RegistrantContact": { "target": "com.amazonaws.route53domains#ContactDetail", "traits": { - "smithy.api#documentation": "Provides details about the domain registrant.
" + "smithy.api#documentation": "Provides details about the domain registrant.
", + "smithy.api#required": {} } }, "TechContact": { "target": "com.amazonaws.route53domains#ContactDetail", "traits": { - "smithy.api#documentation": "Provides details about the domain technical contact.
" + "smithy.api#documentation": "Provides details about the domain technical contact.
", + "smithy.api#required": {} } }, "AdminPrivacy": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "Specifies whether contact information is concealed from WHOIS queries. If the value is\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If the value is false
, WHOIS queries return the\n\t\t\tinformation that you entered for the admin contact.
Specifies whether contact information is concealed from WHOIS\n\t\t\tqueries. If the value is true
, WHOIS (\"who is\") queries return contact\n\t\t\tinformation either for Amazon Registrar (for .com, .net, and .org domains) or for our\n\t\t\tregistrar associate, Gandi (for all other TLDs). If the value is false
,\n\t\t\tWHOIS queries return the information that you entered for the admin contact.
Specifies whether contact information is concealed from WHOIS queries. If the value is\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If the value is false
, WHOIS queries return the\n\t\t\tinformation that you entered for the registrant contact (domain owner).
Specifies whether contact information is concealed from WHOIS queries. If the value is true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If the value is false
, \n\t\t\tWHOIS queries return the information that you entered for the registrant contact (domain owner).
Specifies whether contact information is concealed from WHOIS queries. If the value is\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If the value is false
, WHOIS queries return the\n\t\t\tinformation that you entered for the technical contact.
Specifies whether contact information is concealed from WHOIS queries. If the value is true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If the value is false
, \n\t\t\tWHOIS queries return the information that you entered for the technical contact.
Name of the registrar of the domain as identified in the registry. Domains with a\n\t\t\t.com, .net, or .org TLD are registered by Amazon Registrar. All other domains are\n\t\t\tregistered by our registrar associate, Gandi. The value for domains that are registered\n\t\t\tby Gandi is \"GANDI SAS\"
.
Name of the registrar of the domain as identified in the registry. Domains with a .com, .net, or .org TLD are registered by \n\t\t\tAmazon Registrar. All other domains are registered by our registrar associate, Gandi. The value for domains that are registered by \n\t\t\tGandi is \"GANDI SAS\"
.
The fully qualified name of the WHOIS server that can answer the WHOIS query for the\n\t\t\tdomain.
" + "smithy.api#documentation": "The fully qualified name of the WHOIS server that can answer the WHOIS query for the domain.
" } }, "RegistrarUrl": { @@ -3247,7 +2423,7 @@ "AbuseContactEmail": { "target": "com.amazonaws.route53domains#Email", "traits": { - "smithy.api#documentation": "Email address to contact to report incorrect contact information for a domain, to\n\t\t\treport that the domain is being used to send spam, to report that someone is\n\t\t\tcybersquatting on a domain name, or report some other type of abuse.
" + "smithy.api#documentation": "Email address to contact to report incorrect contact information for a domain, to report that the domain \n\t\t\tis being used to send spam, to report that someone is cybersquatting on a domain name, or report some other type of abuse.
" } }, "AbuseContactPhone": { @@ -3265,25 +2441,25 @@ "CreationDate": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "The date when the domain was created as found in the response to a WHOIS query. The\n\t\t\tdate and time is in Unix time format and Coordinated Universal time (UTC).
" + "smithy.api#documentation": "The date when the domain was created as found in the response to a WHOIS query. The date and time is in \n\t\t\tUnix time format and Coordinated Universal time (UTC).
" } }, "UpdatedDate": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "The last updated date of the domain as found in the response to a WHOIS query. The\n\t\t\tdate and time is in Unix time format and Coordinated Universal time (UTC).
" + "smithy.api#documentation": "The last updated date of the domain as found in the response to a WHOIS query. The date and time is in \n\t\t\tUnix time format and Coordinated Universal time (UTC).
" } }, "ExpirationDate": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "The date when the registration for the domain is set to expire. The date and time is\n\t\t\tin Unix time format and Coordinated Universal time (UTC).
" + "smithy.api#documentation": "The date when the registration for the domain is set to expire. The date and time is in \n\t\t\tUnix time format and Coordinated Universal time (UTC).
" } }, "Reseller": { "target": "com.amazonaws.route53domains#Reseller", "traits": { - "smithy.api#documentation": "Reseller of the domain. Domains registered or transferred using Route 53 domains will\n\t\t\thave \"Amazon\"
as the reseller.
Reseller of the domain. Domains registered or transferred using Route 53 domains will have \"Amazon\"
\n\t\t\tas the reseller.
An array of domain name status codes, also known as Extensible Provisioning Protocol\n\t\t\t(EPP) status codes.
\nICANN, the organization that maintains a central database of domain names, has\n\t\t\tdeveloped a set of domain name status codes that tell you the status of a variety of\n\t\t\toperations on a domain name, for example, registering a domain name, transferring a\n\t\t\tdomain name to another registrar, renewing the registration for a domain name, and so\n\t\t\ton. All registrars use this same set of status codes.
\nFor a current list of domain name status codes and an explanation of what each code\n\t\t\tmeans, go to the ICANN website and search\n\t\t\tfor epp status codes
. (Search on the ICANN website; web searches sometimes\n\t\t\treturn an old version of the document.)
A complex type that contains information about the DNSSEC configuration.
" + "smithy.api#documentation": "An array of domain name status codes, also known as Extensible Provisioning Protocol (EPP) status codes.
\n\t\tICANN, the organization that maintains a central database of domain names, has developed a set of domain name \n\t\t\tstatus codes that tell you the status of a variety of operations on a domain name, for example, registering a domain name, \n\t\t\ttransferring a domain name to another registrar, renewing the registration for a domain name, and so on. All registrars \n\t\t\tuse this same set of status codes.
\n\t\tFor a current list of domain name status codes and an explanation of what each code means, go to the\n\t\t\tICANN website and search for epp status codes
.\n\t\t\t(Search on the ICANN website; web searches sometimes return an old version of the document.)
A domain name that you want to use as the basis for a list of possible domain names.\n\t\t\tThe top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. For a\n\t\t\tlist of supported TLDs, see Domains that You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide.
\nThe domain name can contain only the following characters:
\nLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.\n\t\t\t\t
\nPeriod (.) to separate the labels in the name, such as the .
in\n\t\t\t\t\t\texample.com
.
Internationalized domain names are not supported for some top-level domains. To\n\t\t\tdetermine whether the TLD that you want to use supports internationalized domain names,\n\t\t\tsee Domains that You Can\n\t\t\t\tRegister with Amazon Route 53.
", + "smithy.api#documentation": "A domain name that you want to use as the basis for a list of possible domain names. The top-level domain (TLD), such as .com, \n\t\t\tmust be a TLD that Route 53 supports. For a list of supported TLDs, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tThe domain name can contain only the following characters:
\n\t\tLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.
\nPeriod (.) to separate the labels in the name, such as the .
in example.com
.
Internationalized domain names are not supported for some top-level domains. To determine whether the TLD that you want to use \n\t\t\tsupports internationalized domain names, see \n\t\t\tDomains that You Can Register with Amazon Route 53. \n\t\t\t
", "smithy.api#required": {} } }, @@ -3343,14 +2513,14 @@ "target": "com.amazonaws.route53domains#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of suggested domain names that you want Route 53 to return. Specify a value\n\t\t\tbetween 1 and 50.
", + "smithy.api#documentation": "The number of suggested domain names that you want Route 53 to return. Specify a value between 1 and 50.
", "smithy.api#required": {} } }, "OnlyAvailable": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "If OnlyAvailable
is true
, Route 53 returns only domain names\n\t\t\tthat are available. If OnlyAvailable
is false
, Route 53\n\t\t\treturns domain names without checking whether they're available to be registered. To\n\t\t\tdetermine whether the domain is available, you can call\n\t\t\t\tcheckDomainAvailability
for each suggestion.
If OnlyAvailable
is true
, Route 53 returns only domain names that are available. \n\t\t\tIf OnlyAvailable
is false
, Route 53 returns domain names without checking whether they're \n\t\t\tavailable to be registered. To determine whether the domain is available, you can call checkDomainAvailability
\n\t\t\tfor each suggestion.
A list of possible domain names. If you specified true
for\n\t\t\t\tOnlyAvailable
in the request, the list contains only domains that are\n\t\t\tavailable for registration.
A list of possible domain names. If you specified true
for OnlyAvailable
in the request, \n\t\t\tthe list contains only domains that are available for registration.
This operation returns the current status of an operation that is not\n\t\t\tcompleted.
" + "smithy.api#documentation": "This operation returns the current status of an operation that is not completed.
" } }, "com.amazonaws.route53domains#GetOperationDetailRequest": { @@ -3390,13 +2560,13 @@ "OperationId": { "target": "com.amazonaws.route53domains#OperationId", "traits": { - "smithy.api#documentation": "The identifier for the operation for which you want to get the status. Route 53\n\t\t\treturned the identifier in the response to the original request.
", + "smithy.api#documentation": "The identifier for the operation for which you want to get the status. Route 53 returned the identifier \n\t\t\tin the response to the original request.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The GetOperationDetail request includes the following element.
" + "smithy.api#documentation": "The \n\t\t\tGetOperationDetail \n\t\t\trequest includes the following element.
" } }, "com.amazonaws.route53domains#GetOperationDetailResponse": { @@ -3437,18 +2607,6 @@ "traits": { "smithy.api#documentation": "The date when the request was submitted.
" } - }, - "LastUpdatedDate": { - "target": "com.amazonaws.route53domains#Timestamp", - "traits": { - "smithy.api#documentation": "The date when the operation was last updated.
" - } - }, - "StatusFlag": { - "target": "com.amazonaws.route53domains#StatusFlag", - "traits": { - "smithy.api#documentation": "Lists any outstanding operations that require customer action. Valid values\n\t\t\tare:
\n\n PENDING_ACCEPTANCE
: The operation is waiting for acceptance from\n\t\t\t\t\tthe account that is receiving the domain.
\n PENDING_CUSTOMER_ACTION
: The operation is waiting for customer\n\t\t\t\t\taction, for example, returning an email.
\n PENDING_AUTHORIZATION
: The operation is waiting for the form of\n\t\t\t\t\tauthorization. For more information, see ResendOperationAuthorization.
\n PENDING_PAYMENT_VERIFICATION
: The operation is waiting for the\n\t\t\t\t\tpayment method to validate.
\n PENDING_SUPPORT_CASE
: The operation includes a support case and\n\t\t\t\t\tis waiting for its resolution.
The requested item is not acceptable. For example, for an OperationId it might refer\n\t\t\tto the ID of an operation that is already completed. For a domain name, it might not be\n\t\t\ta valid domain name or belong to the requester account.
" + "smithy.api#documentation": "The requested item is not acceptable. For example, for an OperationId it might refer to the ID of an operation \n\t\t\tthat is already completed. For a domain name, it might not be a valid domain name or belong to the requester account.
" } } }, "traits": { - "smithy.api#documentation": "The requested item is not acceptable. For example, for APIs that accept a domain name,\n\t\t\tthe request might specify a domain name that doesn't belong to the account that\n\t\t\tsubmitted the request. For AcceptDomainTransferFromAnotherAwsAccount
, the\n\t\t\tpassword might be invalid.
The requested item is not acceptable. For example, for APIs that accept a domain name, the request might specify a domain name \n\t\t\tthat doesn't belong to the account that submitted the request. For AcceptDomainTransferFromAnotherAwsAccount
, \n\t\t\tthe password might be invalid.
This operation returns all the domain names registered with Amazon Route 53 for the\n\t\t\tcurrent Amazon Web Services account if no filtering conditions are used.
", + "smithy.api#documentation": "This operation returns all the domain names registered with Amazon Route 53 for the current Amazon Web Services account\n\t\t\t if no filtering conditions are used.
", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextPageMarker", @@ -3547,20 +2696,18 @@ } }, "com.amazonaws.route53domains#ListDomainsAttributeName": { - "type": "enum", - "members": { - "DomainName": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DomainName" - } - }, - "Expiry": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "Expiry" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DomainName", + "name": "DomainName" + }, + { + "value": "Expiry", + "name": "Expiry" } - } + ] } }, "com.amazonaws.route53domains#ListDomainsRequest": { @@ -3569,25 +2716,25 @@ "FilterConditions": { "target": "com.amazonaws.route53domains#FilterConditions", "traits": { - "smithy.api#documentation": "A complex type that contains information about the filters applied during the\n\t\t\t\tListDomains
request. The filter conditions can include domain name and\n\t\t\tdomain expiration.
A complex type that contains information about the filters applied during the ListDomains
request. The filter conditions can\n\t\t\tinclude domain name and domain expiration.
A complex type that contains information about the requested ordering of domains in\n\t\t\tthe returned list.
" + "smithy.api#documentation": "A complex type that contains information about the requested ordering of domains in the returned list.
" } }, "Marker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "For an initial request for a list of domains, omit this element. If the number of\n\t\t\tdomains that are associated with the current Amazon Web Services account is greater than\n\t\t\tthe value that you specified for MaxItems
, you can use Marker
\n\t\t\tto return additional domains. Get the value of NextPageMarker
from the\n\t\t\tprevious response, and submit another request that includes the value of\n\t\t\t\tNextPageMarker
in the Marker
element.
Constraints: The marker must match the value specified in the previous request.
" + "smithy.api#documentation": "For an initial request for a list of domains, omit this element. If the number of domains\n\t\t\tthat are associated with the current Amazon Web Services account is greater than the value that you specified for\n\t\t\tMaxItems
, you can use Marker
to return additional domains. Get the value of\n\t\t\tNextPageMarker
from the previous response, and submit another request that includes the\n\t\t\tvalue of NextPageMarker
in the Marker
element.
Constraints: The marker must match the value specified in the previous request.
" } }, "MaxItems": { "target": "com.amazonaws.route53domains#PageMaxItems", "traits": { - "smithy.api#documentation": "Number of domains to be returned.
\nDefault: 20
" + "smithy.api#documentation": "Number of domains to be returned.
\n\t\tDefault: 20
" } } }, @@ -3601,13 +2748,14 @@ "Domains": { "target": "com.amazonaws.route53domains#DomainSummaryList", "traits": { - "smithy.api#documentation": "A list of domains.
" + "smithy.api#documentation": "A list of domains.
", + "smithy.api#required": {} } }, "NextPageMarker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "If there are more domains than you specified for MaxItems
in the request,\n\t\t\tsubmit another request and include the value of NextPageMarker
in the value\n\t\t\tof Marker
.
If there are more domains than you specified for MaxItems
in the request, submit another\n\t\t\trequest and include the value of NextPageMarker
in the value of Marker
.
Returns information about all of the operations that return an operation ID and that\n\t\t\thave ever been performed on domains that were registered by the current account.
\nThis command runs only in the us-east-1 Region.
", + "smithy.api#documentation": "Returns information about all of the operations that return an operation ID and that have ever been \n\t\t\tperformed on domains that were registered by the current account.
\n\t\tThis command runs only in the us-east-1 Region.
", "smithy.api#paginated": { "inputToken": "Marker", "outputToken": "NextPageMarker", @@ -3644,43 +2792,19 @@ "SubmittedSince": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "An optional parameter that lets you get information about all the operations that you\n\t\t\tsubmitted after a specified date and time. Specify the date and time in Unix time format\n\t\t\tand Coordinated Universal time (UTC).
" + "smithy.api#documentation": "An optional parameter that lets you get information about all the operations that you submitted after a specified date and time. \n\t\t\tSpecify the date and time in Unix time format and Coordinated Universal time (UTC).
" } }, "Marker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "For an initial request for a list of operations, omit this element. If the number of\n\t\t\toperations that are not yet complete is greater than the value that you specified for\n\t\t\t\tMaxItems
, you can use Marker
to return additional\n\t\t\toperations. Get the value of NextPageMarker
from the previous response, and\n\t\t\tsubmit another request that includes the value of NextPageMarker
in the\n\t\t\t\tMarker
element.
For an initial request for a list of operations, omit this element. If the number of operations that are\n\t\t\tnot yet complete is greater than the value that you specified for MaxItems
, you can use Marker
\n\t\t\tto return additional operations. Get the value of NextPageMarker
from the previous response,\n\t\t\tand submit another request that includes the value of NextPageMarker
in the Marker
element.
Number of domains to be returned.
\nDefault: 20
" - } - }, - "Status": { - "target": "com.amazonaws.route53domains#OperationStatusList", - "traits": { - "smithy.api#documentation": "The status of the operations.
" - } - }, - "Type": { - "target": "com.amazonaws.route53domains#OperationTypeList", - "traits": { - "smithy.api#documentation": "An arrays of the domains operation types.
" - } - }, - "SortBy": { - "target": "com.amazonaws.route53domains#ListOperationsSortAttributeName", - "traits": { - "smithy.api#documentation": "The sort type for returned values.
" - } - }, - "SortOrder": { - "target": "com.amazonaws.route53domains#SortOrder", - "traits": { - "smithy.api#documentation": "The sort order ofr returned values, either ascending or descending.
" + "smithy.api#documentation": "Number of domains to be returned.
\n\t\tDefault: 20
" } } }, @@ -3694,13 +2818,14 @@ "Operations": { "target": "com.amazonaws.route53domains#OperationSummaryList", "traits": { - "smithy.api#documentation": "Lists summaries of the operations.
" + "smithy.api#documentation": "Lists summaries of the operations.
", + "smithy.api#required": {} } }, "NextPageMarker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "If there are more operations than you specified for MaxItems
in the\n\t\t\trequest, submit another request and include the value of NextPageMarker
in\n\t\t\tthe value of Marker
.
If there are more operations than you specified for MaxItems
in the request, submit another\n\t\t\trequest and include the value of NextPageMarker
in the value of Marker
.
The ListOperations response includes the following elements.
" } }, - "com.amazonaws.route53domains#ListOperationsSortAttributeName": { - "type": "enum", - "members": { - "SubmittedDate": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SubmittedDate" - } - } - } - }, "com.amazonaws.route53domains#ListPrices": { "type": "operation", "input": { @@ -3736,7 +2850,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists the following prices for either all the TLDs supported by Route 53, or\n\t\t\tthe specified TLD:
\nRegistration
\nTransfer
\nOwner change
\nDomain renewal
\nDomain restoration
\nLists the following prices for either all the TLDs supported by Route 53, or the specified TLD:
\n\t\tRegistration
\nTransfer
\nOwner change
\nDomain renewal
\nDomain restoration
\nThe TLD for which you want to receive the pricing information. For example.\n\t\t\t\t.net
.
If a Tld
value is not provided, a list of prices for all TLDs supported\n\t\t\tby Route 53 is returned.
The TLD for which you want to receive the pricing information. For example.\n\t\t\t\t.net
.
If a Tld
value is not provided, a list of prices for all TLDs supported by Route 53\n\t\tis returned.
For an initial request for a list of prices, omit this element. If the number of\n\t\t\tprices that are not yet complete is greater than the value that you specified for\n\t\t\t\tMaxItems
, you can use Marker
to return additional prices.\n\t\t\tGet the value of NextPageMarker
from the previous response, and submit\n\t\t\tanother request that includes the value of NextPageMarker
in the\n\t\t\t\tMarker
element.
Used only for all TLDs. If you specify a TLD, don't specify a\n\t\t\tMarker
.
For an initial request for a list of prices, omit this element. \n\t\t\tIf the number of prices that are not yet complete is greater than the value that you specified for MaxItems
, \n\t\t\tyou can use Marker
to return additional prices. Get the value of NextPageMarker
from the previous response, \n\t\t\tand submit another request that includes the value of NextPageMarker
in the Marker
element.
Used only for all TLDs. If you specify a TLD, don't specify a Marker
.
Number of Prices
to be returned.
Used only for all TLDs. If you specify a TLD, don't specify a\n\t\t\tMaxItems
.
Number of Prices
to be returned.
Used only for all TLDs. If you specify a TLD, don't specify a MaxItems
.
A complex type that includes all the pricing information. If you specify a TLD, this\n\t\t\tarray contains only the pricing for that TLD.
" + "smithy.api#documentation": "A complex type that includes all the pricing information. If you specify a TLD, this array\n\t\t\tcontains only the pricing for that TLD.
", + "smithy.api#required": {} } }, "NextPageMarker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "If there are more prices than you specified for MaxItems
in the request,\n\t\t\tsubmit another request and include the value of NextPageMarker
in the value\n\t\t\tof Marker
.
Used only for all TLDs. If you specify a TLD, don't specify a\n\t\t\t\tNextPageMarker
.
If there are more prices than you specified for MaxItems
in the request, submit another request and include the value of \n\t\t\tNextPageMarker
in the value of Marker
.
Used only for all TLDs. If you specify a TLD, don't specify a\n\t\t\tNextPageMarker
.
This operation returns all of the tags that are associated with the specified\n\t\t\tdomain.
\nAll tag operations are eventually consistent; subsequent operations might not\n\t\t\timmediately represent all issued operations.
" + "smithy.api#documentation": "This operation returns all of the tags that are associated with the specified domain.
\n\t\tAll tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.
" } }, "com.amazonaws.route53domains#ListTagsForDomainRequest": { @@ -3829,7 +2944,8 @@ "TagList": { "target": "com.amazonaws.route53domains#TagList", "traits": { - "smithy.api#documentation": "A list of the tags that are associated with the specified domain.
" + "smithy.api#documentation": "A list of the tags that are associated with the specified domain.
", + "smithy.api#required": {} } } }, @@ -3843,310 +2959,217 @@ "Name": { "target": "com.amazonaws.route53domains#HostName", "traits": { - "smithy.api#documentation": "The fully qualified host name of the name server.
\nConstraint: Maximum 255 characters
", + "smithy.api#documentation": "The fully qualified host name of the name server.
\n\t\tConstraint: Maximum 255 characters
", "smithy.api#required": {} } }, - "GlueIps": { - "target": "com.amazonaws.route53domains#GlueIpList", - "traits": { - "smithy.api#documentation": "Glue IP address of a name server entry. Glue IP addresses are required only when the\n\t\t\tname of the name server is a subdomain of the domain. For example, if your domain is\n\t\t\texample.com and the name server for the domain is ns.example.com, you need to specify\n\t\t\tthe IP address for ns.example.com.
\nConstraints: The list can contain only one IPv4 and one IPv6 address.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Name server includes the following elements.
" - } - }, - "com.amazonaws.route53domains#NameserverList": { - "type": "list", - "member": { - "target": "com.amazonaws.route53domains#Nameserver" - } - }, - "com.amazonaws.route53domains#NullableInteger": { - "type": "integer" - }, - "com.amazonaws.route53domains#OperationId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 255 - } - } - }, - "com.amazonaws.route53domains#OperationLimitExceeded": { - "type": "structure", - "members": { - "message": { - "target": "com.amazonaws.route53domains#ErrorMessage", - "traits": { - "smithy.api#documentation": "The number of operations or jobs running exceeded the allowed threshold for the\n\t\t\taccount.
" - } - } - }, - "traits": { - "smithy.api#documentation": "The number of operations or jobs running exceeded the allowed threshold for the\n\t\t\taccount.
", - "smithy.api#error": "client", - "smithy.api#httpError": 400 - } - }, - "com.amazonaws.route53domains#OperationStatus": { - "type": "enum", - "members": { - "SUBMITTED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SUBMITTED" - } - }, - "IN_PROGRESS": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "IN_PROGRESS" - } - }, - "ERROR": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ERROR" - } - }, - "SUCCESSFUL": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "SUCCESSFUL" - } - }, - "FAILED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "FAILED" - } - } - } - }, - "com.amazonaws.route53domains#OperationStatusList": { - "type": "list", - "member": { - "target": "com.amazonaws.route53domains#OperationStatus" - }, - "traits": { - "smithy.api#length": { - "min": 0, - "max": 5 - } - } - }, - "com.amazonaws.route53domains#OperationSummary": { - "type": "structure", - "members": { - "OperationId": { - "target": "com.amazonaws.route53domains#OperationId", - "traits": { - "smithy.api#documentation": "Identifier returned to track the requested action.
" - } - }, - "Status": { - "target": "com.amazonaws.route53domains#OperationStatus", - "traits": { - "smithy.api#documentation": "The current status of the requested operation in the system.
" - } - }, - "Type": { - "target": "com.amazonaws.route53domains#OperationType", - "traits": { - "smithy.api#documentation": "Type of the action requested.
" - } - }, - "SubmittedDate": { - "target": "com.amazonaws.route53domains#Timestamp", - "traits": { - "smithy.api#documentation": "The date when the request was submitted.
" - } - }, - "DomainName": { - "target": "com.amazonaws.route53domains#DomainName", - "traits": { - "smithy.api#documentation": "Name of the domain.
" - } - }, - "Message": { - "target": "com.amazonaws.route53domains#ErrorMessage", - "traits": { - "smithy.api#documentation": "Message about the operation.
" - } - }, - "StatusFlag": { - "target": "com.amazonaws.route53domains#StatusFlag", - "traits": { - "smithy.api#documentation": "Automatically checks whether there are no outstanding operations on domains that need\n\t\t\tcustomer attention.
\nValid values are:
\n\n PENDING_ACCEPTANCE
: The operation is waiting for acceptance from\n\t\t\t\t\tthe account that is receiving the domain.
\n PENDING_CUSTOMER_ACTION
: The operation is waiting for customer\n\t\t\t\t\taction, for example, returning an email.
\n PENDING_AUTHORIZATION
: The operation is waiting for the form of\n\t\t\t\t\tauthorization. For more information, see ResendOperationAuthorization.
\n PENDING_PAYMENT_VERIFICATION
: The operation is waiting for the\n\t\t\t\t\tpayment method to validate.
\n PENDING_SUPPORT_CASE
: The operation includes a support case and\n\t\t\t\t\tis waiting for its resolution.
The date when the last change was made in Unix time format and Coordinated Universal\n\t\t\tTime (UTC).
" - } - } - }, - "traits": { - "smithy.api#documentation": "OperationSummary includes the following elements.
" - } - }, - "com.amazonaws.route53domains#OperationSummaryList": { - "type": "list", - "member": { - "target": "com.amazonaws.route53domains#OperationSummary" - } - }, - "com.amazonaws.route53domains#OperationType": { - "type": "enum", - "members": { - "REGISTER_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "REGISTER_DOMAIN" - } - }, - "DELETE_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DELETE_DOMAIN" - } - }, - "TRANSFER_IN_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TRANSFER_IN_DOMAIN" - } - }, - "UPDATE_DOMAIN_CONTACT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_DOMAIN_CONTACT" - } - }, - "UPDATE_NAMESERVER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "UPDATE_NAMESERVER" - } - }, - "CHANGE_PRIVACY_PROTECTION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CHANGE_PRIVACY_PROTECTION" - } - }, - "DOMAIN_LOCK": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DOMAIN_LOCK" - } - }, - "ENABLE_AUTORENEW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ENABLE_AUTORENEW" - } - }, - "DISABLE_AUTORENEW": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DISABLE_AUTORENEW" - } - }, - "ADD_DNSSEC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ADD_DNSSEC" - } - }, - "REMOVE_DNSSEC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "REMOVE_DNSSEC" - } - }, - "EXPIRE_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EXPIRE_DOMAIN" - } - }, - "TRANSFER_OUT_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "TRANSFER_OUT_DOMAIN" - } - }, - "CHANGE_DOMAIN_OWNER": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "CHANGE_DOMAIN_OWNER" - } - }, - "RENEW_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "RENEW_DOMAIN" - } - }, - "PUSH_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PUSH_DOMAIN" - } - }, - "INTERNAL_TRANSFER_OUT_DOMAIN": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "INTERNAL_TRANSFER_OUT_DOMAIN" - } - }, - "INTERNAL_TRANSFER_IN_DOMAIN": { - "target": "smithy.api#Unit", + "GlueIps": { + "target": "com.amazonaws.route53domains#GlueIpList", "traits": { - "smithy.api#enumValue": "INTERNAL_TRANSFER_IN_DOMAIN" + "smithy.api#documentation": "Glue IP address of a name server entry. Glue IP addresses are required only when the name of the name server \n\t\t\tis a subdomain of the domain. For example, if your domain is example.com and the name server for the domain is \n\t\t\tns.example.com, you need to specify the IP address for ns.example.com.
\n\t\tConstraints: The list can contain only one IPv4 and one IPv6 address.
" } } + }, + "traits": { + "smithy.api#documentation": "Nameserver includes the following elements.
" } }, - "com.amazonaws.route53domains#OperationTypeList": { + "com.amazonaws.route53domains#NameserverList": { "type": "list", "member": { - "target": "com.amazonaws.route53domains#OperationType" - }, + "target": "com.amazonaws.route53domains#Nameserver" + } + }, + "com.amazonaws.route53domains#OperationId": { + "type": "string", "traits": { "smithy.api#length": { "min": 0, - "max": 18 + "max": 255 } } }, - "com.amazonaws.route53domains#Operator": { - "type": "enum", + "com.amazonaws.route53domains#OperationLimitExceeded": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.route53domains#ErrorMessage", + "traits": { + "smithy.api#documentation": "The number of operations or jobs running exceeded the allowed threshold for the account.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The number of operations or jobs running exceeded the allowed threshold for the account.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.route53domains#OperationStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SUBMITTED", + "name": "SUBMITTED" + }, + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" + }, + { + "value": "ERROR", + "name": "ERROR" + }, + { + "value": "SUCCESSFUL", + "name": "SUCCESSFUL" + }, + { + "value": "FAILED", + "name": "FAILED" + } + ] + } + }, + "com.amazonaws.route53domains#OperationSummary": { + "type": "structure", "members": { - "LE": { - "target": "smithy.api#Unit", + "OperationId": { + "target": "com.amazonaws.route53domains#OperationId", + "traits": { + "smithy.api#documentation": "Identifier returned to track the requested action.
", + "smithy.api#required": {} + } + }, + "Status": { + "target": "com.amazonaws.route53domains#OperationStatus", "traits": { - "smithy.api#enumValue": "LE" + "smithy.api#documentation": "The current status of the requested operation in the system.
", + "smithy.api#required": {} } }, - "GE": { - "target": "smithy.api#Unit", + "Type": { + "target": "com.amazonaws.route53domains#OperationType", "traits": { - "smithy.api#enumValue": "GE" + "smithy.api#documentation": "Type of the action requested.
", + "smithy.api#required": {} } }, - "BEGINS_WITH": { - "target": "smithy.api#Unit", + "SubmittedDate": { + "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#enumValue": "BEGINS_WITH" + "smithy.api#documentation": "The date when the request was submitted.
", + "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#documentation": "OperationSummary includes the following elements.
" + } + }, + "com.amazonaws.route53domains#OperationSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.route53domains#OperationSummary" + } + }, + "com.amazonaws.route53domains#OperationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "REGISTER_DOMAIN", + "name": "REGISTER_DOMAIN" + }, + { + "value": "DELETE_DOMAIN", + "name": "DELETE_DOMAIN" + }, + { + "value": "TRANSFER_IN_DOMAIN", + "name": "TRANSFER_IN_DOMAIN" + }, + { + "value": "UPDATE_DOMAIN_CONTACT", + "name": "UPDATE_DOMAIN_CONTACT" + }, + { + "value": "UPDATE_NAMESERVER", + "name": "UPDATE_NAMESERVER" + }, + { + "value": "CHANGE_PRIVACY_PROTECTION", + "name": "CHANGE_PRIVACY_PROTECTION" + }, + { + "value": "DOMAIN_LOCK", + "name": "DOMAIN_LOCK" + }, + { + "value": "ENABLE_AUTORENEW", + "name": "ENABLE_AUTORENEW" + }, + { + "value": "DISABLE_AUTORENEW", + "name": "DISABLE_AUTORENEW" + }, + { + "value": "ADD_DNSSEC", + "name": "ADD_DNSSEC" + }, + { + "value": "REMOVE_DNSSEC", + "name": "REMOVE_DNSSEC" + }, + { + "value": "EXPIRE_DOMAIN", + "name": "EXPIRE_DOMAIN" + }, + { + "value": "TRANSFER_OUT_DOMAIN", + "name": "TRANSFER_OUT_DOMAIN" + }, + { + "value": "CHANGE_DOMAIN_OWNER", + "name": "CHANGE_DOMAIN_OWNER" + }, + { + "value": "RENEW_DOMAIN", + "name": "RENEW_DOMAIN" + }, + { + "value": "PUSH_DOMAIN", + "name": "PUSH_DOMAIN" + }, + { + "value": "INTERNAL_TRANSFER_OUT_DOMAIN", + "name": "INTERNAL_TRANSFER_OUT_DOMAIN" + }, + { + "value": "INTERNAL_TRANSFER_IN_DOMAIN", + "name": "INTERNAL_TRANSFER_IN_DOMAIN" + } + ] + } + }, + "com.amazonaws.route53domains#Operator": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LE", + "name": "LE" + }, + { + "value": "GE", + "name": "GE" + }, + { + "value": "BEGINS_WITH", + "name": "BEGINS_WITH" + } + ] } }, "com.amazonaws.route53domains#PageMarker": { @@ -4195,69 +3218,23 @@ "smithy.api#documentation": "Currency-specific price information.
" } }, - "com.amazonaws.route53domains#PushDomain": { - "type": "operation", - "input": { - "target": "com.amazonaws.route53domains#PushDomainRequest" - }, - "output": { - "target": "smithy.api#Unit" - }, - "errors": [ - { - "target": "com.amazonaws.route53domains#InvalidInput" - }, - { - "target": "com.amazonaws.route53domains#OperationLimitExceeded" - }, - { - "target": "com.amazonaws.route53domains#UnsupportedTLD" - } - ], - "traits": { - "smithy.api#documentation": "Moves a domain from Amazon Web Services to another registrar.
\nSupported actions:
\nChanges the IPS tags of a .uk domain, and pushes it to transit. Transit means\n\t\t\t\t\tthat the domain is ready to be transferred to another registrar.
\nName of the domain.
", - "smithy.api#required": {} - } - }, - "Target": { - "target": "com.amazonaws.route53domains#Label", - "traits": { - "smithy.api#documentation": "New IPS tag for the domain.
", - "smithy.api#required": {} - } - } - } - }, "com.amazonaws.route53domains#ReachabilityStatus": { - "type": "enum", - "members": { - "PENDING": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING" - } - }, - "DONE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DONE" - } - }, - "EXPIRED": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "EXPIRED" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "DONE", + "name": "DONE" + }, + { + "value": "EXPIRED", + "name": "EXPIRED" } - } + ] } }, "com.amazonaws.route53domains#RegisterDomain": { @@ -4289,7 +3266,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation registers a domain. Domains are registered either by Amazon Registrar\n\t\t\t(for .com, .net, and .org domains) or by our registrar associate, Gandi (for all other\n\t\t\tdomains). For some top-level domains (TLDs), this operation requires extra\n\t\t\tparameters.
\nWhen you register a domain, Amazon Route 53 does the following:
\nCreates a Route 53 hosted zone that has the same name as the domain. Route 53\n\t\t\t\t\tassigns four name servers to your hosted zone and automatically updates your\n\t\t\t\t\tdomain registration with the names of these name servers.
\nEnables auto renew, so your domain registration will renew automatically each\n\t\t\t\t\tyear. We'll notify you in advance of the renewal date so you can choose whether\n\t\t\t\t\tto renew the registration.
\nOptionally enables privacy protection, so WHOIS queries return contact\n\t\t\t\t\tinformation either for Amazon Registrar (for .com, .net, and .org domains) or\n\t\t\t\t\tfor our registrar associate, Gandi (for all other TLDs). If you don't enable\n\t\t\t\t\tprivacy protection, WHOIS queries return the information that you entered for\n\t\t\t\t\tthe administrative, registrant, and technical contacts.
\nYou must specify the same privacy setting for the administrative,\n\t\t\t\t\t\tregistrant, and technical contacts.
\nIf registration is successful, returns an operation ID that you can use to\n\t\t\t\t\ttrack the progress and completion of the action. If the request is not completed\n\t\t\t\t\tsuccessfully, the domain registrant is notified by email.
\nCharges your Amazon Web Services account an amount based on the top-level\n\t\t\t\t\tdomain. For more information, see Amazon Route 53 Pricing.
\nThis operation registers a domain. Domains are registered either by Amazon Registrar (for .com, .net, and .org domains) or by \n\t\t\tour registrar associate, Gandi (for all other domains). For some top-level domains (TLDs), this operation requires extra parameters.
\n\t\tWhen you register a domain, Amazon Route 53 does the following:
\n\t\t\tCreates a Route 53 hosted zone that has the same name as the domain. Route 53 assigns four name servers \n\t\t\t\t\tto your hosted zone and automatically updates your domain registration with the names of these name servers.
\nEnables autorenew, so your domain registration will renew automatically each year. We'll notify you \n\t\t\t\t\tin advance of the renewal date so you can choose whether to renew the registration.
\nOptionally enables privacy protection, so WHOIS queries return contact information either for Amazon Registrar \n\t\t\t\t\t(for .com, .net, and .org domains) or for our registrar associate, Gandi (for all other TLDs). If you don't enable privacy \n\t\t\t\t\tprotection, WHOIS queries return the information that you entered for the administrative, registrant, and technical contacts.
\n\t\t\t\t\tYou must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\t\t\t\tIf registration is successful, returns an operation ID that you can use to track the progress and \n\t\t\t\t\tcompletion of the action. If the request is not completed successfully, the domain registrant is notified by email.
\nCharges your Amazon Web Services account an amount based on the top-level domain. For more information, see \n\t\t\t\t\tAmazon Route 53 Pricing.
\nThe domain name that you want to register. The top-level domain (TLD), such as .com,\n\t\t\tmust be a TLD that Route 53 supports. For a list of supported TLDs, see Domains that You Can Register with Amazon Route 53 in the Amazon\n\t\t\t\tRoute 53 Developer Guide.
\nThe domain name can contain only the following characters:
\nLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.\n\t\t\t\t
\nPeriod (.) to separate the labels in the name, such as the .
in\n\t\t\t\t\t\texample.com
.
Internationalized domain names are not supported for some top-level domains. To\n\t\t\tdetermine whether the TLD that you want to use supports internationalized domain names,\n\t\t\tsee Domains that You Can\n\t\t\t\tRegister with Amazon Route 53. For more information, see Formatting Internationalized Domain Names.
", + "smithy.api#documentation": "The domain name that you want to register. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. \n\t\t\tFor a list of supported TLDs, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tThe domain name can contain only the following characters:
\n\t\tLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.
\nPeriod (.) to separate the labels in the name, such as the .
in example.com
.
Internationalized domain names are not supported for some top-level domains. To determine whether the TLD that you want to use supports \n\t\t\tinternationalized domain names, see \n\t\t\tDomains that You Can Register with Amazon Route 53. \n\t\t\tFor more information, see \n\t\t\tFormatting Internationalized Domain Names.\n\t\t
", "smithy.api#required": {} } }, @@ -4311,53 +3288,53 @@ "DurationInYears": { "target": "com.amazonaws.route53domains#DurationInYears", "traits": { - "smithy.api#documentation": "The number of years that you want to register the domain for. Domains are registered\n\t\t\tfor a minimum of one year. The maximum period depends on the top-level domain. For the\n\t\t\trange of valid values for your domain, see Domains that You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide.
\nDefault: 1
", + "smithy.api#documentation": "The number of years that you want to register the domain for. Domains are registered for a minimum of one year. \n\t\t\tThe maximum period depends on the top-level domain. For the range of valid values for your domain, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tDefault: 1
", "smithy.api#required": {} } }, "AutoRenew": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether the domain will be automatically renewed (true
) or not\n\t\t\t\t(false
). Auto renewal only takes effect after the account is\n\t\t\tcharged.
Default: true
\n
Indicates whether the domain will be automatically renewed (true
) or not (false
). \n\t\t\tAutorenewal only takes effect after the account is charged.
Default: true
\n
Provides detailed contact information. For information about the values that you\n\t\t\tspecify for each element, see ContactDetail.
", + "smithy.api#documentation": "Provides detailed contact information. For information about the values that you specify for each element, see \n\t\t\tContactDetail.
", "smithy.api#required": {} } }, "RegistrantContact": { "target": "com.amazonaws.route53domains#ContactDetail", "traits": { - "smithy.api#documentation": "Provides detailed contact information. For information about the values that you\n\t\t\tspecify for each element, see ContactDetail.
", + "smithy.api#documentation": "Provides detailed contact information. For information about the values that you specify for each element, see \n\t\t\tContactDetail.
", "smithy.api#required": {} } }, "TechContact": { "target": "com.amazonaws.route53domains#ContactDetail", "traits": { - "smithy.api#documentation": "Provides detailed contact information. For information about the values that you\n\t\t\tspecify for each element, see ContactDetail.
", + "smithy.api#documentation": "Provides detailed contact information. For information about the values that you specify for each element, see \n\t\t\tContactDetail.
", "smithy.api#required": {} } }, "PrivacyProtectAdminContact": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the admin contact.
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the admin contact.
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the registrant contact (the domain owner).
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the registrant contact (the domain owner).
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the technical contact.
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the technical contact.
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tDefault: true
\n
Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
", + "smithy.api#required": {} } } }, @@ -4411,7 +3389,7 @@ } ], "traits": { - "smithy.api#documentation": "Rejects the transfer of a domain from another Amazon Web Services account to the\n\t\t\tcurrent Amazon Web Services account. You initiate a transfer betweenAmazon Web Services accounts using TransferDomainToAnotherAwsAccount.
\nUse either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example,\n\t\t\t\tDomain Transfer from Aws Account 111122223333 has been cancelled
.
Rejects the transfer of a domain from another Amazon Web Services account to the current Amazon Web Services account. You initiate a transfer betweenAmazon Web Services accounts using \n\t\t\tTransferDomainToAnotherAwsAccount.\n\t\t
\n\t\t\n\t\tUse either \n\t\t\tListOperations or \n\t\t\tGetOperationDetail \n\t\t\tto determine whether the operation succeeded. \n\t\t\tGetOperationDetail \n\t\t\tprovides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled
. \n\t\t
The name of the domain that was specified when another Amazon Web Services account\n\t\t\tsubmitted a TransferDomainToAnotherAwsAccount request.
", + "smithy.api#documentation": "The name of the domain that was specified when another Amazon Web Services account submitted a \n\t\t\tTransferDomainToAnotherAwsAccount\n\t\t\trequest.\n\t\t
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The RejectDomainTransferFromAnotherAwsAccount request includes the following\n\t\t\telement.
" + "smithy.api#documentation": "The RejectDomainTransferFromAnotherAwsAccount request includes the following element.
" } }, "com.amazonaws.route53domains#RejectDomainTransferFromAnotherAwsAccountResponse": { @@ -4435,12 +3413,12 @@ "OperationId": { "target": "com.amazonaws.route53domains#OperationId", "traits": { - "smithy.api#documentation": "The identifier that TransferDomainToAnotherAwsAccount
returned to track\n\t\t\tthe progress of the request. Because the transfer request was rejected, the value is no\n\t\t\tlonger valid, and you can't use GetOperationDetail
to query the operation\n\t\t\tstatus.
The identifier that TransferDomainToAnotherAwsAccount
returned to track the progress of the request. \n\t\t\tBecause the transfer request was rejected, the value is no longer valid, and you can't use GetOperationDetail
\n\t\t\tto query the operation status.
The RejectDomainTransferFromAnotherAwsAccount response includes the following\n\t\t\telement.
" + "smithy.api#documentation": "The RejectDomainTransferFromAnotherAwsAccount response includes the following element.
" } }, "com.amazonaws.route53domains#RenewDomain": { @@ -4469,7 +3447,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation renews a domain for the specified number of years. The cost of renewing\n\t\t\tyour domain is billed to your Amazon Web Services account.
\nWe recommend that you renew your domain several weeks before the expiration date. Some\n\t\t\tTLD registries delete domains before the expiration date if you haven't renewed far\n\t\t\tenough in advance. For more information about renewing domain registration, see Renewing\n\t\t\t\tRegistration for a Domain in the Amazon Route 53 Developer\n\t\t\t\tGuide.
" + "smithy.api#documentation": "This operation renews a domain for the specified number of years. The cost of renewing your domain is billed to your Amazon Web Services account.
\n\t\tWe recommend that you renew your domain several weeks before the expiration date. Some TLD registries delete domains before the \n\t\t\texpiration date if you haven't renewed far enough in advance. For more information about renewing domain registration, see \n\t\t\tRenewing Registration for a Domain \n\t\t\tin the Amazon Route 53 Developer Guide.
" } }, "com.amazonaws.route53domains#RenewDomainRequest": { @@ -4485,20 +3463,20 @@ "DurationInYears": { "target": "com.amazonaws.route53domains#DurationInYears", "traits": { - "smithy.api#documentation": "The number of years that you want to renew the domain for. The maximum number of years\n\t\t\tdepends on the top-level domain. For the range of valid values for your domain, see\n\t\t\t\tDomains that You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide.
\nDefault: 1
" + "smithy.api#documentation": "The number of years that you want to renew the domain for. The maximum number of years depends on the top-level domain. \n\t\t\tFor the range of valid values for your domain, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tDefault: 1
" } }, "CurrentExpiryYear": { "target": "com.amazonaws.route53domains#CurrentExpiryYear", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The year when the registration for the domain is set to expire. This value must match\n\t\t\tthe current expiration date for the domain.
", + "smithy.api#documentation": "The year when the registration for the domain is set to expire. This value must match the current expiration date for the domain.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "A RenewDomain
request includes the number of years that you want to renew\n\t\t\tfor and the current expiration year.
A RenewDomain
request includes the number of years that you want to renew for and the current expiration year.
Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
", + "smithy.api#required": {} } } } @@ -4535,7 +3514,7 @@ } ], "traits": { - "smithy.api#documentation": "For operations that require confirmation that the email address for the registrant\n\t\t\tcontact is valid, such as registering a new domain, this operation resends the\n\t\t\tconfirmation email to the current email address for the registrant contact.
" + "smithy.api#documentation": "For operations that require confirmation that the email address for the registrant contact is valid, \n\t\t\tsuch as registering a new domain, this operation resends the confirmation email to the current email address for the registrant contact.
" } }, "com.amazonaws.route53domains#ResendContactReachabilityEmailRequest": { @@ -4544,7 +3523,7 @@ "domainName": { "target": "com.amazonaws.route53domains#DomainName", "traits": { - "smithy.api#documentation": "The name of the domain for which you want Route 53 to resend a confirmation email to\n\t\t\tthe registrant contact.
" + "smithy.api#documentation": "The name of the domain for which you want Route 53 to resend a confirmation email to the registrant contact.
" } } } @@ -4561,42 +3540,13 @@ "emailAddress": { "target": "com.amazonaws.route53domains#Email", "traits": { - "smithy.api#documentation": "The email address for the registrant contact at the time that we sent the verification\n\t\t\temail.
" + "smithy.api#documentation": "The email address for the registrant contact at the time that we sent the verification email.
" } }, "isAlreadyVerified": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "\n True
if the email address for the registrant contact has already been\n\t\t\tverified, and false
otherwise. If the email address has already been\n\t\t\tverified, we don't send another confirmation email.
Resend the form of authorization email for this operation.
" - } - }, - "com.amazonaws.route53domains#ResendOperationAuthorizationRequest": { - "type": "structure", - "members": { - "OperationId": { - "target": "com.amazonaws.route53domains#OperationId", - "traits": { - "smithy.api#documentation": "Operation ID.
", - "smithy.api#required": {} + "smithy.api#documentation": "\n True
if the email address for the registrant contact has already been verified, and false
otherwise. \n\t\t\tIf the email address has already been verified, we don't send another confirmation email.
This operation returns the authorization code for the domain. To transfer a domain to\n\t\t\tanother registrar, you provide this value to the new registrar.
" + "smithy.api#documentation": "This operation returns the AuthCode for the domain. To transfer a domain to another registrar, you provide this value to the new registrar.
" } }, "com.amazonaws.route53domains#RetrieveDomainAuthCodeRequest": { @@ -4633,7 +3583,7 @@ } }, "traits": { - "smithy.api#documentation": "A request for the authorization code for the specified domain. To transfer a domain to\n\t\t\tanother registrar, you provide this value to the new registrar.
" + "smithy.api#documentation": "A request for the authorization code for the specified domain. To transfer a domain to another registrar, you provide \n\t\t\tthis value to the new registrar.
" } }, "com.amazonaws.route53domains#RetrieveDomainAuthCodeResponse": { @@ -4642,7 +3592,8 @@ "AuthCode": { "target": "com.amazonaws.route53domains#DomainAuthCode", "traits": { - "smithy.api#documentation": "The authorization code for the domain.
" + "smithy.api#documentation": "The authorization code for the domain.
", + "smithy.api#required": {} } } }, @@ -4657,9 +3608,6 @@ { "target": "com.amazonaws.route53domains#AcceptDomainTransferFromAnotherAwsAccount" }, - { - "target": "com.amazonaws.route53domains#AssociateDelegationSignerToDomain" - }, { "target": "com.amazonaws.route53domains#CancelDomainTransferToAnotherAwsAccount" }, @@ -4681,9 +3629,6 @@ { "target": "com.amazonaws.route53domains#DisableDomainTransferLock" }, - { - "target": "com.amazonaws.route53domains#DisassociateDelegationSignerFromDomain" - }, { "target": "com.amazonaws.route53domains#EnableDomainAutoRenew" }, @@ -4714,9 +3659,6 @@ { "target": "com.amazonaws.route53domains#ListTagsForDomain" }, - { - "target": "com.amazonaws.route53domains#PushDomain" - }, { "target": "com.amazonaws.route53domains#RegisterDomain" }, @@ -4729,9 +3671,6 @@ { "target": "com.amazonaws.route53domains#ResendContactReachabilityEmail" }, - { - "target": "com.amazonaws.route53domains#ResendOperationAuthorization" - }, { "target": "com.amazonaws.route53domains#RetrieveDomainAuthCode" }, @@ -4769,7 +3708,7 @@ "name": "route53domains" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Route 53 API actions let you register domain names and perform related\n\t\t\toperations.
", + "smithy.api#documentation": "Amazon Route 53 API actions let you register domain names and perform related operations.
", "smithy.api#title": "Amazon Route 53 Domains", "smithy.api#xmlNamespace": { "uri": "https://route53domains.amazonaws.com/doc/2014-05-15/" @@ -4779,7 +3718,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -4828,6 +3767,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -5090,9 +4038,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -5103,9 +4051,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -5116,9 +4064,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -5129,9 +4077,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -5142,9 +4090,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -5154,9 +4102,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -5166,9 +4114,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } @@ -5183,14 +4131,14 @@ "Name": { "target": "com.amazonaws.route53domains#ListDomainsAttributeName", "traits": { - "smithy.api#documentation": "Field to be used for sorting the list of domains. It can be either the name or the\n\t\t\texpiration for a domain. Note that if filterCondition
is used in the same\n\t\t\t\tListDomains\n\t\t\tcall, the field used for sorting has to be the same as the field used for\n\t\t\tfiltering.
Field to be used for sorting the list of domains. It can be either the name or the expiration for a domain. Note that if \n\t\t\tfilterCondition
is used in the same \n\t\t\tListDomains\n\t\t\t call, the field used for sorting has to be the same as the field used for filtering.
The sort order for a list of domains. Either ascending (ASC) or descending\n\t\t\t(DES).
", + "smithy.api#documentation": "The sort order for a list of domains. Either ascending (ASC) or descending (DES).
", "smithy.api#required": {} } } @@ -5200,20 +4148,18 @@ } }, "com.amazonaws.route53domains#SortOrder": { - "type": "enum", - "members": { - "ASC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "ASC" - } - }, - "DESC": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DESC" + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ASC", + "name": "ASC" + }, + { + "value": "DESC", + "name": "DESC" } - } + ] } }, "com.amazonaws.route53domains#State": { @@ -5225,41 +4171,6 @@ } } }, - "com.amazonaws.route53domains#StatusFlag": { - "type": "enum", - "members": { - "PENDING_ACCEPTANCE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING_ACCEPTANCE" - } - }, - "PENDING_CUSTOMER_ACTION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING_CUSTOMER_ACTION" - } - }, - "PENDING_AUTHORIZATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING_AUTHORIZATION" - } - }, - "PENDING_PAYMENT_VERIFICATION": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING_PAYMENT_VERIFICATION" - } - }, - "PENDING_SUPPORT_CASE": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "PENDING_SUPPORT_CASE" - } - } - } - }, "com.amazonaws.route53domains#String": { "type": "string" }, @@ -5285,13 +4196,13 @@ "Key": { "target": "com.amazonaws.route53domains#TagKey", "traits": { - "smithy.api#documentation": "The key (name) of a tag.
\nValid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"
\nConstraints: Each key can be 1-128 characters long.
" + "smithy.api#documentation": "The key (name) of a tag.
\n\t\tValid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"
\n\t\tConstraints: Each key can be 1-128 characters long.
" } }, "Value": { "target": "com.amazonaws.route53domains#TagValue", "traits": { - "smithy.api#documentation": "The value of a tag.
\nValid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"
\nConstraints: Each value can be 0-256 characters long.
" + "smithy.api#documentation": "The value of a tag.
\n\t\tValid values: A-Z, a-z, 0-9, space, \".:/=+\\-@\"
\n\t\tConstraints: Each value can be 0-256 characters long.
" } } }, @@ -5336,7 +4247,7 @@ "type": "string", "traits": { "smithy.api#length": { - "min": 2, + "min": 1, "max": 255 } } @@ -5370,7 +4281,7 @@ } ], "traits": { - "smithy.api#documentation": "Transfers a domain from another registrar to Amazon Route 53. When the transfer is\n\t\t\tcomplete, the domain is registered either with Amazon Registrar (for .com, .net, and\n\t\t\t.org domains) or with our registrar associate, Gandi (for all other TLDs).
\nFor more information about transferring domains, see the following topics:
\nFor transfer requirements, a detailed procedure, and information about viewing\n\t\t\t\t\tthe status of a domain that you're transferring to Route 53, see Transferring Registration for a Domain to Amazon Route 53 in the\n\t\t\t\t\t\tAmazon Route 53 Developer Guide.
\nFor information about how to transfer a domain from one Amazon Web Services account to another, see TransferDomainToAnotherAwsAccount.
\nFor information about how to transfer a domain to another domain registrar,\n\t\t\t\t\tsee Transferring a Domain from Amazon Route 53 to Another Registrar in\n\t\t\t\t\tthe Amazon Route 53 Developer Guide.
\nIf the registrar for your domain is also the DNS service provider for the domain, we\n\t\t\thighly recommend that you transfer your DNS service to Route 53 or to another DNS\n\t\t\tservice provider before you transfer your registration. Some registrars provide free DNS\n\t\t\tservice when you purchase a domain registration. When you transfer the registration, the\n\t\t\tprevious registrar will not renew your domain registration and could end your DNS\n\t\t\tservice at any time.
\nIf the registrar for your domain is also the DNS service provider for the domain\n\t\t\t\tand you don't transfer DNS service to another provider, your website, email, and the\n\t\t\t\tweb applications associated with the domain might become unavailable.
\nIf the transfer is successful, this method returns an operation ID that you can use to\n\t\t\ttrack the progress and completion of the action. If the transfer doesn't complete\n\t\t\tsuccessfully, the domain registrant will be notified by email.
" + "smithy.api#documentation": "Transfers a domain from another registrar to Amazon Route 53. When the transfer is complete, the domain is registered either with \n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or with our registrar associate, Gandi (for all other TLDs).
\n\t\tFor more information about transferring domains, see the following topics:
\n\t\tFor transfer requirements, a detailed procedure, and information about viewing the status of a domain that you're transferring \n\t\t\t\tto Route 53, see \n\t\t\t\tTransferring Registration for a \n\t\t\t\tDomain to Amazon Route 53 in the Amazon Route 53 Developer Guide.
\nFor information about how to transfer a domain from one Amazon Web Services account to another, see \n\t\t\t\tTransferDomainToAnotherAwsAccount.\n\t\t\t\t
\n\t\t\tFor information about how to transfer a domain to another domain registrar, see \n\t\t\t\tTransferring a Domain from \n\t\t\t\tAmazon Route 53 to Another Registrar in the Amazon Route 53 Developer Guide.
\nIf the registrar for your domain is also the DNS service provider for the domain, we highly recommend that you \n\t\t\ttransfer your DNS service to Route 53 or to another DNS service provider before you transfer your registration. Some registrars \n\t\t\tprovide free DNS service when you purchase a domain registration. When you transfer the registration, the previous registrar \n\t\t\twill not renew your domain registration and could end your DNS service at any time.
\n\t\t\n\t\t\tIf the registrar for your domain is also the DNS service provider for the domain and you don't \n\t\t\t\t\ttransfer DNS service to another provider, your website, email, and the web applications associated with the domain \n\t\t\t\t\tmight become unavailable.
\n\t\t\tIf the transfer is successful, this method returns an operation ID that you can use to track the progress and \n\t\t\tcompletion of the action. If the transfer doesn't complete successfully, the domain registrant will be notified by email.
" } }, "com.amazonaws.route53domains#TransferDomainRequest": { @@ -5379,7 +4290,7 @@ "DomainName": { "target": "com.amazonaws.route53domains#DomainName", "traits": { - "smithy.api#documentation": "The name of the domain that you want to transfer to Route 53. The top-level domain\n\t\t\t(TLD), such as .com, must be a TLD that Route 53 supports. For a list of supported TLDs,\n\t\t\tsee Domains that You Can\n\t\t\t\tRegister with Amazon Route 53 in the Amazon Route 53 Developer\n\t\t\t\tGuide.
\nThe domain name can contain only the following characters:
\nLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.\n\t\t\t\t
\nPeriod (.) to separate the labels in the name, such as the .
in\n\t\t\t\t\t\texample.com
.
The name of the domain that you want to transfer to Route 53. The top-level domain (TLD), such as .com, must be a TLD that Route 53 supports. \n\t\t\tFor a list of supported TLDs, see \n\t\t\tDomains that You Can Register with Amazon Route 53 in the \n\t\t\tAmazon Route 53 Developer Guide.
\n\t\tThe domain name can contain only the following characters:
\n\t\tLetters a through z. Domain names are not case sensitive.
\nNumbers 0 through 9.
\nHyphen (-). You can't specify a hyphen at the beginning or end of a label.
\nPeriod (.) to separate the labels in the name, such as the .
in example.com
.
The number of years that you want to register the domain for. Domains are registered\n\t\t\tfor a minimum of one year. The maximum period depends on the top-level domain.
\nDefault: 1
", + "smithy.api#documentation": "The number of years that you want to register the domain for. Domains are registered for a minimum of one year. \n\t\t\tThe maximum period depends on the top-level domain.
\n\t\tDefault: 1
", "smithy.api#required": {} } }, @@ -5405,13 +4316,13 @@ "AuthCode": { "target": "com.amazonaws.route53domains#DomainAuthCode", "traits": { - "smithy.api#documentation": "The authorization code for the domain. You get this value from the current\n\t\t\tregistrar.
" + "smithy.api#documentation": "The authorization code for the domain. You get this value from the current registrar.
" } }, "AutoRenew": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "Indicates whether the domain will be automatically renewed (true) or not (false). Auto\n\t\t\trenewal only takes effect after the account is charged.
\nDefault: true
" + "smithy.api#documentation": "Indicates whether the domain will be automatically renewed (true) or not (false). Autorenewal only takes effect \n\t\t\tafter the account is charged.
\n\t\tDefault: true
" } }, "AdminContact": { @@ -5438,19 +4349,19 @@ "PrivacyProtectAdminContact": { "target": "com.amazonaws.route53domains#Boolean", "traits": { - "smithy.api#documentation": "Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the admin contact.
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the admin contact.
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the registrant contact (domain owner).
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the registrant contact (domain owner).
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the technical contact.
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nDefault: true
\n
Whether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the technical contact.
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tDefault: true
\n
Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
", + "smithy.api#required": {} } } }, @@ -5495,7 +4407,7 @@ } ], "traits": { - "smithy.api#documentation": "Transfers a domain from the current Amazon Web Services account to another Amazon Web Services account. Note the following:
\nThe Amazon Web Services account that you're transferring the domain to must\n\t\t\t\t\taccept the transfer. If the other account doesn't accept the transfer within 3\n\t\t\t\t\tdays, we cancel the transfer. See AcceptDomainTransferFromAnotherAwsAccount.
\nYou can cancel the transfer before the other account accepts it. See CancelDomainTransferToAnotherAwsAccount.
\nThe other account can reject the transfer. See RejectDomainTransferFromAnotherAwsAccount.
\nWhen you transfer a domain from one Amazon Web Services account to another, Route\n\t\t\t\t53 doesn't transfer the hosted zone that is associated with the domain. DNS\n\t\t\t\tresolution isn't affected if the domain and the hosted zone are owned by separate\n\t\t\t\taccounts, so transferring the hosted zone is optional. For information about\n\t\t\t\ttransferring the hosted zone to another Amazon Web Services account, see Migrating a\n\t\t\t\t\tHosted Zone to a Different Amazon Web Services Account in the\n\t\t\t\t\tAmazon Route 53 Developer Guide.
\nUse either ListOperations or GetOperationDetail to determine whether the operation succeeded. GetOperationDetail provides additional information, for example,\n\t\t\t\tDomain Transfer from Aws Account 111122223333 has been cancelled
.
Transfers a domain from the current Amazon Web Services account to another Amazon Web Services account. Note the following:
\n\t\tThe Amazon Web Services account that you're transferring the domain to must accept the transfer. If the other account \n\t\t\t\tdoesn't accept the transfer within 3 days, we cancel the transfer. See \n\t\t\t\tAcceptDomainTransferFromAnotherAwsAccount.\n\t\t\t\t
\n\t\t\tYou can cancel the transfer before the other account accepts it. See \n\t\t\t\tCancelDomainTransferToAnotherAwsAccount.\n\t\t\t\t
\n\t\t\tThe other account can reject the transfer. See \n\t\t\t\t\tRejectDomainTransferFromAnotherAwsAccount.\n\t\t\t\t
\n\t\t\tWhen you transfer a domain from one Amazon Web Services account to another, Route 53 doesn't transfer the hosted zone that is associated \n\t\t\t\twith the domain. DNS resolution isn't affected if the domain and the hosted zone are owned by separate accounts, \n\t\t\t\tso transferring the hosted zone is optional. For information about transferring the hosted zone to another Amazon Web Services account, see \n\t\t\t\tMigrating a Hosted Zone to a \n\t\t\t\t\tDifferent Amazon Web Services Account in the Amazon Route 53 Developer Guide.
\n\t\tUse either \n\t\t\tListOperations or \n\t\t\tGetOperationDetail \n\t\t\tto determine whether the operation succeeded. \n\t\t\tGetOperationDetail \n\t\t\tprovides additional information, for example, Domain Transfer from Aws Account 111122223333 has been cancelled
. \n\t\t
The account ID of the Amazon Web Services account that you want to transfer the domain\n\t\t\tto, for example, 111122223333
.
The account ID of the Amazon Web Services account that you want to transfer the domain to, for example, 111122223333
.
Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
" } }, "Password": { "target": "com.amazonaws.route53domains#String", "traits": { - "smithy.api#documentation": "To finish transferring a domain to another Amazon Web Services account, the account\n\t\t\tthat the domain is being transferred to must submit an AcceptDomainTransferFromAnotherAwsAccount request. The request must include\n\t\t\tthe value of the Password
element that was returned in the\n\t\t\t\tTransferDomainToAnotherAwsAccount
response.
To finish transferring a domain to another Amazon Web Services account, the account that the domain is being transferred to must submit an \n\t\t\tAcceptDomainTransferFromAnotherAwsAccount\n\t\t\trequest. The request must include the value of the Password
element that was returned in the \n\t\t\tTransferDomainToAnotherAwsAccount
response.
The TransferDomainToAnotherAwsAccount
response includes the following\n\t\t\telements.
The TransferDomainToAnotherAwsAccount
response includes the following elements.
Whether the domain name can be transferred to Route 53.
\nYou can transfer only domains that have a value of TRANSFERABLE
or\n\t\t\t\t\tTransferable
.
Valid values:
\nThe domain name can be transferred to Route 53.
\nThe domain name can't be transferred to Route 53.
\nReserved for future use.
\nThe domain already exists in the current Amazon Web Services account.
\nthe domain exists in another Amazon Web Services account.
\nPremium domain transfer is not supported.
\nWhether the domain name can be transferred to Route 53.
\n\t\tYou can transfer only domains that have a value of TRANSFERABLE
for Transferable
.
Valid values:
\n\t\tThe domain name can be transferred to Route 53.
\nThe domain name can't be transferred to Route 53.
\nReserved for future use.
\nThis operation updates the contact information for a particular domain. You must\n\t\t\tspecify information for at least one contact: registrant, administrator, or\n\t\t\ttechnical.
\nIf the update is successful, this method returns an operation ID that you can use to\n\t\t\ttrack the progress and completion of the operation. If the request is not completed\n\t\t\tsuccessfully, the domain registrant will be notified by email.
" + "smithy.api#documentation": "This operation updates the contact information for a particular domain. You must specify information for at least one contact: \n\t\t\tregistrant, administrator, or technical.
\n\t\tIf the update is successful, this method returns an operation ID that you can use to track the progress and completion of the action. \n\t\t\tIf the request is not completed successfully, the domain registrant will be notified by email.
" } }, "com.amazonaws.route53domains#UpdateDomainContactPrivacy": { @@ -5655,7 +4543,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation updates the specified domain contact's privacy setting. When privacy\n\t\t\tprotection is enabled, contact information such as email address is replaced either with\n\t\t\tcontact information for Amazon Registrar (for .com, .net, and .org domains) or with\n\t\t\tcontact information for our registrar associate, Gandi.
\nYou must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nThis operation affects only the contact information for the specified contact type\n\t\t\t(administrative, registrant, or technical). If the request succeeds, Amazon Route 53\n\t\t\treturns an operation ID that you can use with GetOperationDetail to track the progress and completion of the action. If\n\t\t\tthe request doesn't complete successfully, the domain registrant will be notified by\n\t\t\temail.
\nBy disabling the privacy service via API, you consent to the publication of the\n\t\t\t\tcontact information provided for this domain via the public WHOIS database. You\n\t\t\t\tcertify that you are the registrant of this domain name and have the authority to\n\t\t\t\tmake this decision. You may withdraw your consent at any time by enabling privacy\n\t\t\t\tprotection using either UpdateDomainContactPrivacy
or the Route 53\n\t\t\t\tconsole. Enabling privacy protection removes the contact information provided for\n\t\t\t\tthis domain from the WHOIS database. For more information on our privacy practices,\n\t\t\t\tsee https://aws.amazon.com/privacy/.
This operation updates the specified domain contact's privacy setting. When privacy protection is enabled, \n\t\t\tcontact information such as email address is replaced either with contact information for Amazon Registrar (for .com, .net, and .org \n\t\t\tdomains) or with contact information for our registrar associate, Gandi.
\n\t\tYou must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tThis operation affects only the contact information for the specified contact type (administrative, registrant, or technical). \n\t\t\tIf the request succeeds, Amazon Route 53 returns an operation ID that you can use with \n\t\t\tGetOperationDetail\n\t\t\tto track the progress and completion of the action. If the request doesn't complete successfully, the domain registrant will be notified by email.
\n\t\tBy disabling the privacy service via API, you consent to the publication of the contact information provided for this domain \n\t\t\tvia the public WHOIS database. You certify that you are the registrant of this domain name and have the authority to make this decision. \n\t\t\tYou may withdraw your consent at any time by enabling privacy protection using either UpdateDomainContactPrivacy
or the \n\t\t\tRoute 53 console. Enabling privacy protection removes the contact information provided for this domain from the WHOIS database. \n\t\t\tFor more information on our privacy practices, see \n\t\t\thttps://aws.amazon.com/privacy/.
Whether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the admin contact.
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nWhether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the admin contact.
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tWhether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the registrant contact (domain owner).
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nWhether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the registrant contact (domain owner).
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tWhether you want to conceal contact information from WHOIS queries. If you specify\n\t\t\t\ttrue
, WHOIS (\"who is\") queries return contact information either for\n\t\t\tAmazon Registrar (for .com, .net, and .org domains) or for our registrar associate,\n\t\t\tGandi (for all other TLDs). If you specify false
, WHOIS queries return the\n\t\t\tinformation that you entered for the technical contact.
You must specify the same privacy setting for the administrative, registrant, and\n\t\t\t\ttechnical contacts.
\nWhether you want to conceal contact information from WHOIS queries. If you specify true
, \n\t\t\tWHOIS (\"who is\") queries return contact information either for Amazon Registrar (for .com, .net, and .org domains) \n\t\t\tor for our registrar associate, Gandi (for all other TLDs). If you specify false
, \n\t\t\tWHOIS queries return the information that you entered for the technical contact.
You must specify the same privacy setting for the administrative, registrant, and technical contacts.
\n\t\tIdentifier for tracking the progress of the request. To use this ID to query the\n\t\t\toperation status, use GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To use this ID to query the operation status, use GetOperationDetail.
", + "smithy.api#required": {} } } }, @@ -5732,12 +4621,6 @@ "traits": { "smithy.api#documentation": "Provides detailed contact information.
" } - }, - "Consent": { - "target": "com.amazonaws.route53domains#Consent", - "traits": { - "smithy.api#documentation": "Customer's consent for the owner change request.
" - } } }, "traits": { @@ -5750,7 +4633,8 @@ "OperationId": { "target": "com.amazonaws.route53domains#OperationId", "traits": { - "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
", + "smithy.api#required": {} } } }, @@ -5784,7 +4668,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation replaces the current set of name servers for the domain with the\n\t\t\tspecified set of name servers. If you use Amazon Route 53 as your DNS service, specify\n\t\t\tthe four name servers in the delegation set for the hosted zone for the domain.
\nIf successful, this operation returns an operation ID that you can use to track the\n\t\t\tprogress and completion of the action. If the request is not completed successfully, the\n\t\t\tdomain registrant will be notified by email.
" + "smithy.api#documentation": "This operation replaces the current set of name servers for the domain with the specified set of name servers. \n\t\t\tIf you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.
\n\t\tIf successful, this operation returns an operation ID that you can use to track the progress and completion of the action. \n\t\t\tIf the request is not completed successfully, the domain registrant will be notified by email.
" } }, "com.amazonaws.route53domains#UpdateDomainNameserversRequest": { @@ -5813,7 +4697,7 @@ } }, "traits": { - "smithy.api#documentation": "Replaces the current set of name servers for the domain with the specified set of name\n\t\t\tservers. If you use Amazon Route 53 as your DNS service, specify the four name servers\n\t\t\tin the delegation set for the hosted zone for the domain.
\nIf successful, this operation returns an operation ID that you can use to track the\n\t\t\tprogress and completion of the action. If the request is not completed successfully, the\n\t\t\tdomain registrant will be notified by email.
" + "smithy.api#documentation": "Replaces the current set of name servers for the domain with the specified set of name servers. \n\t\t\tIf you use Amazon Route 53 as your DNS service, specify the four name servers in the delegation set for the hosted zone for the domain.
\n\t\tIf successful, this operation returns an operation ID that you can use to track the progress and \n\t\t\tcompletion of the action. If the request is not completed successfully, the domain registrant will be notified by email.
" } }, "com.amazonaws.route53domains#UpdateDomainNameserversResponse": { @@ -5822,7 +4706,8 @@ "OperationId": { "target": "com.amazonaws.route53domains#OperationId", "traits": { - "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status,\n\t\t\tuse GetOperationDetail.
" + "smithy.api#documentation": "Identifier for tracking the progress of the request. To query the operation status, use \n\t\t\tGetOperationDetail.
", + "smithy.api#required": {} } } }, @@ -5850,7 +4735,7 @@ } ], "traits": { - "smithy.api#documentation": "This operation adds or updates tags for a specified domain.
\nAll tag operations are eventually consistent; subsequent operations might not\n\t\t\timmediately represent all issued operations.
" + "smithy.api#documentation": "This operation adds or updates tags for a specified domain.
\n\t\tAll tag operations are eventually consistent; subsequent operations might not immediately represent all issued operations.
" } }, "com.amazonaws.route53domains#UpdateTagsForDomainRequest": { @@ -5866,7 +4751,7 @@ "TagsToUpdate": { "target": "com.amazonaws.route53domains#TagList", "traits": { - "smithy.api#documentation": "A list of the tag keys and values that you want to add or update. If you specify a key\n\t\t\tthat already exists, the corresponding value will be replaced.
" + "smithy.api#documentation": "A list of the tag keys and values that you want to add or update. If you specify a key \n\t\t\tthat already exists, the corresponding value will be replaced.
" } } }, @@ -5928,25 +4813,25 @@ "Start": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "The beginning date and time for the time period for which you want a list of billing\n\t\t\trecords. Specify the date and time in Unix time format and Coordinated Universal time\n\t\t\t(UTC).
" + "smithy.api#documentation": "The beginning date and time for the time period for which you want a list of billing records. Specify the date and time \n\t\t\tin Unix time format and Coordinated Universal time (UTC).
" } }, "End": { "target": "com.amazonaws.route53domains#Timestamp", "traits": { - "smithy.api#documentation": "The end date and time for the time period for which you want a list of billing\n\t\t\trecords. Specify the date and time in Unix time format and Coordinated Universal time\n\t\t\t(UTC).
" + "smithy.api#documentation": "The end date and time for the time period for which you want a list of billing records. Specify the date and time \n\t\t\tin Unix time format and Coordinated Universal time (UTC).
" } }, "Marker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "For an initial request for a list of billing records, omit this element. If the number\n\t\t\tof billing records that are associated with the current Amazon Web Services account\n\t\t\tduring the specified period is greater than the value that you specified for\n\t\t\t\tMaxItems
, you can use Marker
to return additional billing\n\t\t\trecords. Get the value of NextPageMarker
from the previous response, and\n\t\t\tsubmit another request that includes the value of NextPageMarker
in the\n\t\t\t\tMarker
element.
Constraints: The marker must match the value of NextPageMarker
that was\n\t\t\treturned in the previous response.
For an initial request for a list of billing records, omit this element. If the number of billing records \n\t\t\tthat are associated with the current Amazon Web Services account during the specified period is greater than the value that \n\t\t\tyou specified for MaxItems
, you can use Marker
to return additional billing records. \n\t\t\tGet the value of NextPageMarker
from the previous response, and submit another request that includes \n\t\t\tthe value of NextPageMarker
in the Marker
element.\n\t\t
Constraints: The marker must match the value of NextPageMarker
that was returned in the previous response.
The number of billing records to be returned.
\nDefault: 20
" + "smithy.api#documentation": "The number of billing records to be returned.
\n\t\tDefault: 20
" } } }, @@ -5960,7 +4845,7 @@ "NextPageMarker": { "target": "com.amazonaws.route53domains#PageMarker", "traits": { - "smithy.api#documentation": "If there are more billing records than you specified for MaxItems
in the\n\t\t\trequest, submit another request and include the value of NextPageMarker
in\n\t\t\tthe value of Marker
.
If there are more billing records than you specified for MaxItems
in the request, submit another \n\t\t\trequest and include the value of NextPageMarker
in the value of Marker
.
Adds or overwrites one or more tags for the specified SageMaker resource. You can add\n tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform\n jobs, models, labeling jobs, work teams, endpoint configurations, and\n endpoints.
\nEach tag consists of a key and an optional value. Tag keys must be unique per\n resource. For more information about tags, see For more information, see Amazon Web Services Tagging Strategies.
\nTags that you add to a hyperparameter tuning job by calling this API are also\n added to any training jobs that the hyperparameter tuning job launches after you\n call this API, but not to training jobs that the hyperparameter tuning job launched\n before you called this API. To make sure that the tags associated with a\n hyperparameter tuning job are also added to all training jobs that the\n hyperparameter tuning job launches, add the tags when you first create the tuning\n job by specifying them in the Tags
parameter of CreateHyperParameterTuningJob\n
Tags that you add to a SageMaker Studio Domain or User Profile by calling this API\n are also added to any Apps that the Domain or User Profile launches after you call\n this API, but not to Apps that the Domain or User Profile launched before you called\n this API. To make sure that the tags associated with a Domain or User Profile are\n also added to all Apps that the Domain or User Profile launches, add the tags when\n you first create the Domain or User Profile by specifying them in the\n Tags
parameter of CreateDomain or CreateUserProfile.
Adds or overwrites one or more tags for the specified SageMaker resource. You can add\n tags to notebook instances, training jobs, hyperparameter tuning jobs, batch transform\n jobs, models, labeling jobs, work teams, endpoint configurations, and\n endpoints.
\nEach tag consists of a key and an optional value. Tag keys must be unique per\n resource. For more information about tags, see For more information, see Amazon Web Services Tagging Strategies.
\nTags that you add to a hyperparameter tuning job by calling this API are also\n added to any training jobs that the hyperparameter tuning job launches after you\n call this API, but not to training jobs that the hyperparameter tuning job launched\n before you called this API. To make sure that the tags associated with a\n hyperparameter tuning job are also added to all training jobs that the\n hyperparameter tuning job launches, add the tags when you first create the tuning\n job by specifying them in the Tags
parameter of CreateHyperParameterTuningJob\n
Tags that you add to a SageMaker Studio Domain or User Profile by calling this API\n are also added to any Apps that the Domain or User Profile launches after you call\n this API, but not to Apps that the Domain or User Profile launched before you called\n this API. To make sure that the tags associated with a Domain or User Profile are\n also added to all Apps that the Domain or User Profile launches, add the tags when\n you first create the Domain or User Profile by specifying them in the\n Tags
parameter of CreateDomain or CreateUserProfile.
The registry path of the Docker image\n that contains the training algorithm.\n For information about docker registry paths for SageMaker built-in algorithms, see Docker Registry\n Paths and Example Code in the Amazon SageMaker developer guide.\n SageMaker supports both registry/repository[:tag]
and\n registry/repository[@digest]
image path formats. For more information\n about using your custom training container, see Using Your Own Algorithms with\n Amazon SageMaker.
You must specify either the algorithm name to the AlgorithmName
\n parameter or the image URI of the algorithm container to the\n TrainingImage
parameter.
For more information, see the note in the AlgorithmName
parameter\n description.
The registry path of the Docker image\n that contains the training algorithm.\n For information about docker registry paths for SageMaker built-in algorithms, see Docker Registry\n Paths and Example Code in the Amazon SageMaker developer guide.\n SageMaker supports both registry/repository[:tag]
and\n registry/repository[@digest]
image path formats. For more information\n about using your custom training container, see Using Your Own Algorithms with\n Amazon SageMaker.
You must specify either the algorithm name to the AlgorithmName
\n parameter or the image URI of the algorithm container to the\n TrainingImage
parameter.
For more information, see the note in the AlgorithmName
parameter\n description.
The name of the algorithm resource to use for the training job. This must be an\n algorithm resource that you created or subscribe to on Amazon Web Services\n Marketplace.
\nYou must specify either the algorithm name to the AlgorithmName
\n parameter or the image URI of the algorithm container to the\n TrainingImage
parameter.
Note that the AlgorithmName
parameter is mutually exclusive with the\n TrainingImage
parameter. If you specify a value for the\n AlgorithmName
parameter, you can't specify a value for\n TrainingImage
, and vice versa.
If you specify values for both parameters, the training job might break; if you\n don't specify any value for both parameters, the training job might raise a\n null
error.
The name of the algorithm resource to use for the training job. This must be an\n algorithm resource that you created or subscribe to on Amazon Web Services\n Marketplace.
\nYou must specify either the algorithm name to the AlgorithmName
\n parameter or the image URI of the algorithm container to the\n TrainingImage
parameter.
Note that the AlgorithmName
parameter is mutually exclusive with the\n TrainingImage
parameter. If you specify a value for the\n AlgorithmName
parameter, you can't specify a value for\n TrainingImage
, and vice versa.
If you specify values for both parameters, the training job might break; if you\n don't specify any value for both parameters, the training job might raise a\n null
error.
To generate and save time-series metrics during training, set to true
.\n The default is false
and time-series metrics aren't generated except in the\n following cases:
You use one of the SageMaker built-in algorithms
\nYou use one of the following Prebuilt SageMaker Docker Images:
\nTensorflow (version >= 1.15)
\nMXNet (version >= 1.6)
\nPyTorch (version >= 1.3)
\nYou specify at least one MetricDefinition\n
\nTo generate and save time-series metrics during training, set to true
.\n The default is false
and time-series metrics aren't generated except in the\n following cases:
You use one of the SageMaker built-in algorithms
\nYou use one of the following Prebuilt SageMaker Docker Images:
\nTensorflow (version >= 1.15)
\nMXNet (version >= 1.6)
\nPyTorch (version >= 1.3)
\nYou specify at least one MetricDefinition\n
\nSpecifies the training algorithm to use in a CreateTrainingJob\n request.
\nFor more information about algorithms provided by SageMaker, see Algorithms. For\n information about using your own algorithms, see Using Your Own Algorithms with Amazon\n SageMaker.
" + "smithy.api#documentation": "Specifies the training algorithm to use in a CreateTrainingJob\n request.
\nFor more information about algorithms provided by SageMaker, see Algorithms. For\n information about using your own algorithms, see Using Your Own Algorithms with Amazon\n SageMaker.
" } }, "com.amazonaws.sagemaker#AlgorithmStatus": { @@ -674,7 +674,7 @@ } }, "traits": { - "smithy.api#documentation": "Defines a training job and a batch transform job that SageMaker runs to validate your\n algorithm.
\nThe data provided in the validation profile is made available to your buyers on\n Amazon Web Services Marketplace.
" + "smithy.api#documentation": "Defines a training job and a batch transform job that SageMaker runs to validate your\n algorithm.
\nThe data provided in the validation profile is made available to your buyers on\n Amazon Web Services Marketplace.
" } }, "com.amazonaws.sagemaker#AlgorithmValidationProfiles": { @@ -717,7 +717,7 @@ "AnnotationConsolidationLambdaArn": { "target": "com.amazonaws.sagemaker#LambdaFunctionArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.
\nThis parameter is required for all labeling jobs. For built-in task types, use one\n of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n AnnotationConsolidationLambdaArn
. For custom labeling workflows, see\n Post-annotation Lambda.
\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox
\n
\n Image classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass
\n
\n Multi-label image classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel
\n
\n Semantic segmentation - Treats each pixel in an image\n as a multi-class classification and treats pixel annotations from workers as \"votes\" for\n the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation
\n
\n Text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass
\n
\n Multi-label text classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel
\n
\n Named entity recognition - Groups similar selections\n and calculates aggregate boundaries, resolving to most-assigned label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition
\n
\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass
\n
\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection
\n
\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking
\n
\n 3D Point Cloud Object Detection - Use this task type\n when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids\n around objects. For example, you can use this task type to ask workers to identify\n different types of objects in a point cloud, such as cars, bikes, and\n pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking - Use this task type\n when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D\n point cloud frames. For example, you can use this task type to ask workers to track the\n movement of vehicles across multiple point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation - Use this task\n type when you want workers to create a point-level semantic segmentation masks by\n painting objects in a 3D point cloud using different colors where each color is assigned\n to one of the classes you specify.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation
\n
\n Use the following ARNs for Label Verification and Adjustment Jobs\n
\nUse label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .
\n\n Semantic Segmentation Adjustment - Treats each pixel\n in an image as a multi-class classification and treats pixel adjusted annotations from\n workers as \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation
\n
\n Semantic Segmentation Verification - Uses a variant\n of the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation
\n
\n Bounding Box Adjustment - Finds the most similar\n boxes from different workers based on the Jaccard index of the adjusted\n annotations.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox
\n
\n Bounding Box Verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification judgement\n for bounding box labels based on annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox
\n
\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection
\n
\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking
\n
\n 3D Point Cloud Object Detection Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects that appear in a\n sequence of 3D point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation Adjustment - Use this task\n type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
The Amazon Resource Name (ARN) of a Lambda function implements the logic for annotation consolidation and to process output data.
\nThis parameter is required for all labeling jobs. For built-in task types, use one\n of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n AnnotationConsolidationLambdaArn
. For custom labeling workflows, see\n Post-annotation Lambda.
\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-BoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-BoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-BoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-BoundingBox
\n
\n Image classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClass
\n
\n Multi-label image classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-ImageMultiClassMultiLabel
\n
\n Semantic segmentation - Treats each pixel in an image\n as a multi-class classification and treats pixel annotations from workers as \"votes\" for\n the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-SemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-SemanticSegmentation
\n
\n Text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClass
\n
\n Multi-label text classification - Uses a variant of\n the Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-TextMultiClassMultiLabel
\n
\n Named entity recognition - Groups similar selections\n and calculates aggregate boundaries, resolving to most-assigned label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-NamedEntityRecognition
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-NamedEntityRecognition
\n
\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoMultiClass
\n
\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectDetection
\n
\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VideoObjectTracking
\n
\n 3D Point Cloud Object Detection - Use this task type\n when you want workers to classify objects in a 3D point cloud by drawing 3D cuboids\n around objects. For example, you can use this task type to ask workers to identify\n different types of objects in a point cloud, such as cars, bikes, and\n pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking - Use this task type\n when you want workers to draw 3D cuboids around objects that appear in a sequence of 3D\n point cloud frames. For example, you can use this task type to ask workers to track the\n movement of vehicles across multiple point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation - Use this task\n type when you want workers to create a point-level semantic segmentation masks by\n painting objects in a 3D point cloud using different colors where each color is assigned\n to one of the classes you specify.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-3DPointCloudSemanticSegmentation
\n
\n Use the following ARNs for Label Verification and Adjustment Jobs\n
\nUse label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .
\n \n\n Semantic Segmentation Adjustment - Treats each pixel\n in an image as a multi-class classification and treats pixel adjusted annotations from\n workers as \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentSemanticSegmentation
\n
\n Semantic Segmentation Verification - Uses a variant\n of the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationSemanticSegmentation
\n
\n Bounding Box Adjustment - Finds the most similar\n boxes from different workers based on the Jaccard index of the adjusted\n annotations.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentBoundingBox
\n
\n Bounding Box Verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification judgement\n for bounding box labels based on annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-VerificationBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-VerificationBoundingBox
\n
\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.
\n\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectDetection
\n
\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-AdjustmentVideoObjectTracking
\n
\n 3D Point Cloud Object Detection Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects in a 3D point cloud.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking Adjustment - Use this\n task type when you want workers to adjust 3D cuboids around objects that appear in a\n sequence of 3D point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation Adjustment - Use this task\n type when you want workers to adjust a point-level semantic segmentation masks using a paint tool.
\n\n arn:aws:lambda:us-east-1:432418664414:function:ACS-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-1:432418664414:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:ACS-Adjustment3DPointCloudSemanticSegmentation
\n
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker\n uses to encrypt the asynchronous inference output in Amazon S3.
\n " + "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker\n uses to encrypt the asynchronous inference output in Amazon S3.
\n " } }, "S3OutputPath": { @@ -2394,7 +2394,7 @@ "MetricName": { "target": "com.amazonaws.sagemaker#AutoMLMetricEnum", "traits": { - "smithy.api#documentation": "The name of the objective metric used to measure the predictive quality of a machine\n learning system. This metric is optimized during training to provide the best estimate for\n model parameter values from data.
\nHere are the options:
\nThe ratio of the number of correctly classified items to the total number of\n (correctly and incorrectly) classified items. It is used for both binary and\n multiclass classification. Accuracy measures how close the predicted class values\n are to the actual values. Values for accuracy metrics vary between zero (0) and\n one (1). A value of 1 indicates perfect accuracy, and 0 indicates perfect\n inaccuracy.
\nThe area under the curve (AUC) metric is used to compare and evaluate binary\n classification by algorithms that return probabilities, such as logistic\n regression. To map the probabilities into classifications, these are compared\n against a threshold value.
\nThe relevant curve is the receiver operating characteristic curve (ROC curve).\n The ROC curve plots the true positive rate (TPR) of predictions (or recall)\n against the false positive rate (FPR) as a function of the threshold value, above\n which a prediction is considered positive. Increasing the threshold results in\n fewer false positives, but more false negatives.
\nAUC is the area under this ROC curve. Therefore, AUC provides an aggregated\n measure of the model performance across all possible classification thresholds.\n AUC scores vary between 0 and 1. A score of 1 indicates perfect accuracy, and a\n score of one half (0.5) indicates that the prediction is not better than a random\n classifier.
\n\n BalancedAccuracy
is a metric that measures the ratio of accurate\n predictions to all predictions. This ratio is calculated after normalizing true\n positives (TP) and true negatives (TN) by the total number of positive (P) and\n negative (N) values. It is used in both binary and multiclass classification and\n is defined as follows: 0.5*((TP/P)+(TN/N)), with values ranging from 0 to 1.\n BalancedAccuracy
gives a better measure of accuracy when the\n number of positives or negatives differ greatly from each other in an imbalanced\n dataset. For example, when only 1% of email is spam.
The F1
score is the harmonic mean of the precision and recall,\n defined as follows: F1 = 2 * (precision * recall) / (precision + recall). It is\n used for binary classification into classes traditionally referred to as positive\n and negative. Predictions are said to be true when they match their actual\n (correct) class, and false when they do not.
Precision is the ratio of the true positive predictions to all positive\n predictions, and it includes the false positives in a dataset. Precision measures\n the quality of the prediction when it predicts the positive class.
\nRecall (or sensitivity) is the ratio of the true positive predictions to all\n actual positive instances. Recall measures how completely a model predicts the\n actual class members in a dataset.
\nF1 scores vary between 0 and 1. A score of 1 indicates the best possible\n performance, and 0 indicates the worst.
\nThe F1macro
score applies F1 scoring to multiclass classification\n problems. It does this by calculating the precision and recall, and then taking\n their harmonic mean to calculate the F1 score for each class. Lastly, the F1macro\n averages the individual scores to obtain the F1macro
score.\n F1macro
scores vary between 0 and 1. A score of 1 indicates the\n best possible performance, and 0 indicates the worst.
The mean absolute error (MAE) is a measure of how different the predicted and\n actual values are, when they're averaged over all values. MAE is commonly used in\n regression analysis to understand model prediction error. If there is linear\n regression, MAE represents the average distance from a predicted line to the\n actual value. MAE is defined as the sum of absolute errors divided by the number\n of observations. Values range from 0 to infinity, with smaller numbers indicating\n a better model fit to the data.
\nThe mean squared error (MSE) is the average of the squared differences between\n the predicted and actual values. It is used for regression. MSE values are always\n positive. The better a model is at predicting the actual values, the smaller the\n MSE value is
\nPrecision measures how well an algorithm predicts the true positives (TP) out\n of all of the positives that it identifies. It is defined as follows: Precision =\n TP/(TP+FP), with values ranging from zero (0) to one (1), and is used in binary\n classification. Precision is an important metric when the cost of a false positive\n is high. For example, the cost of a false positive is very high if an airplane\n safety system is falsely deemed safe to fly. A false positive (FP) reflects a\n positive prediction that is actually negative in the data.
\nThe precision macro computes precision for multiclass classification problems.\n It does this by calculating precision for each class and averaging scores to\n obtain precision for several classes. PrecisionMacro
scores range\n from zero (0) to one (1). Higher scores reflect the model's ability to predict\n true positives (TP) out of all of the positives that it identifies, averaged\n across multiple classes.
R2, also known as the coefficient of determination, is used in regression to\n quantify how much a model can explain the variance of a dependent variable. Values\n range from one (1) to negative one (-1). Higher numbers indicate a higher fraction\n of explained variability. R2
values close to zero (0) indicate that\n very little of the dependent variable can be explained by the model. Negative\n values indicate a poor fit and that the model is outperformed by a constant\n function. For linear regression, this is a horizontal line.
Recall measures how well an algorithm correctly predicts all of the true\n positives (TP) in a dataset. A true positive is a positive prediction that is also\n an actual positive value in the data. Recall is defined as follows: Recall =\n TP/(TP+FN), with values ranging from 0 to 1. Higher scores reflect a better\n ability of the model to predict true positives (TP) in the data, and is used in\n binary classification.
\nRecall is important when testing for cancer because it's used to find all of\n the true positives. A false positive (FP) reflects a positive prediction that is\n actually negative in the data. It is often insufficient to measure only recall,\n because predicting every output as a true positive will yield a perfect recall\n score.
\nThe RecallMacro computes recall for multiclass classification problems by\n calculating recall for each class and averaging scores to obtain recall for\n several classes. RecallMacro scores range from 0 to 1. Higher scores reflect the\n model's ability to predict true positives (TP) in a dataset. Whereas, a true\n positive reflects a positive prediction that is also an actual positive value in\n the data. It is often insufficient to measure only recall, because predicting\n every output as a true positive will yield a perfect recall score.
\nRoot mean squared error (RMSE) measures the square root of the squared\n difference between predicted and actual values, and it's averaged over all values.\n It is used in regression analysis to understand model prediction error. It's an\n important metric to indicate the presence of large model errors and outliers.\n Values range from zero (0) to infinity, with smaller numbers indicating a better\n model fit to the data. RMSE is dependent on scale, and should not be used to\n compare datasets of different sizes.
\nIf you do not specify a metric explicitly, the default behavior is to automatically\n use:
\n\n MSE
: for regression.
\n F1
: for binary classification
\n Accuracy
: for multiclass classification.
The name of the objective metric used to measure the predictive quality of a machine\n learning system. This metric is optimized during training to provide the best estimate for\n model parameter values from data.
\nHere are the options:
\n\n MSE
: The mean squared error (MSE) is the average of the squared\n differences between the predicted and actual values. It is used for regression. MSE\n values are always positive: the better a model is at predicting the actual values,\n the smaller the MSE value is. When the data contains outliers, they tend to dominate\n the MSE, which might cause subpar prediction performance.
\n Accuracy
: The ratio of the number of correctly classified items to\n the total number of (correctly and incorrectly) classified items. It is used for\n binary and multiclass classification. It measures how close the predicted class\n values are to the actual values. Accuracy values vary between zero and one: one\n indicates perfect accuracy and zero indicates perfect inaccuracy.
\n F1
: The F1 score is the harmonic mean of the precision and recall. It\n is used for binary classification into classes traditionally referred to as positive\n and negative. Predictions are said to be true when they match their actual (correct)\n class and false when they do not. Precision is the ratio of the true positive\n predictions to all positive predictions (including the false positives) in a data set\n and measures the quality of the prediction when it predicts the positive class.\n Recall (or sensitivity) is the ratio of the true positive predictions to all actual\n positive instances and measures how completely a model predicts the actual class\n members in a data set. The standard F1 score weighs precision and recall equally. But\n which metric is paramount typically depends on specific aspects of a problem. F1\n scores vary between zero and one: one indicates the best possible performance and\n zero the worst.
\n AUC
: The area under the curve (AUC) metric is used to compare and\n evaluate binary classification by algorithms such as logistic regression that return\n probabilities. A threshold is needed to map the probabilities into classifications.\n The relevant curve is the receiver operating characteristic curve that plots the true\n positive rate (TPR) of predictions (or recall) against the false positive rate (FPR)\n as a function of the threshold value, above which a prediction is considered\n positive. Increasing the threshold results in fewer false positives but more false\n negatives. AUC is the area under this receiver operating characteristic curve and so\n provides an aggregated measure of the model performance across all possible\n classification thresholds. The AUC score can also be interpreted as the probability\n that a randomly selected positive data point is more likely to be predicted positive\n than a randomly selected negative example. AUC scores vary between zero and one: a\n score of one indicates perfect accuracy and a score of one half indicates that the\n prediction is not better than a random classifier. Values under one half predict less\n accurately than a random predictor. But such consistently bad predictors can simply\n be inverted to obtain better than random predictors.
\n F1macro
: The F1macro score applies F1 scoring to multiclass\n classification. In this context, you have multiple classes to predict. You just\n calculate the precision and recall for each class as you did for the positive class\n in binary classification. Then, use these values to calculate the F1 score for each\n class and average them to obtain the F1macro score. F1macro scores vary between zero\n and one: one indicates the best possible performance and zero the worst.
If you do not specify a metric explicitly, the default behavior is to automatically\n use:
\n\n MSE
: for regression.
\n F1
: for binary classification
\n Accuracy
: for multiclass classification.
The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on\n the storage volume attached to the ML compute instance that hosts the batch transform job.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on\n the storage volume attached to the ML compute instance that hosts the batch transform job.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
Specifies the endpoint capacity type.
\n\n INSTANCE_COUNT
: The endpoint activates based on the number of\n instances.
\n CAPACITY_PERCENT
: The endpoint activates based on the specified\n percentage of capacity.
Specifies the endpoint capacity type.
\n\n INSTANCE_COUNT
: The endpoint activates based on the number of\n instances.
\n CAPACITY_PERCENT
: The endpoint activates based on the specified\n percentage of capacity.
Specify RecordIO as the value when input data is in raw format but the training\n algorithm requires the RecordIO format. In this case, SageMaker wraps each individual S3\n object in a RecordIO record. If the input data is already in RecordIO format, you don't\n need to set this attribute. For more information, see Create\n a Dataset Using RecordIO.
\nIn File mode, leave this field unset or set it to None.
" + "smithy.api#documentation": "\nSpecify RecordIO as the value when input data is in raw format but the training\n algorithm requires the RecordIO format. In this case, SageMaker wraps each individual S3\n object in a RecordIO record. If the input data is already in RecordIO format, you don't\n need to set this attribute. For more information, see Create\n a Dataset Using RecordIO.
\nIn File mode, leave this field unset or set it to None.
" } }, "InputMode": { "target": "com.amazonaws.sagemaker#TrainingInputMode", "traits": { - "smithy.api#documentation": "(Optional) The input mode to use for the data channel in a training job. If you don't\n set a value for InputMode
, SageMaker uses the value set for\n TrainingInputMode
. Use this parameter to override the\n TrainingInputMode
setting in a AlgorithmSpecification\n request when you have a channel that needs a different input mode from the training\n job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML\n storage volume, and mount the directory to a Docker volume, use File
input\n mode. To stream data directly from Amazon S3 to the container, choose Pipe
input\n mode.
To use a model for incremental training, choose File
input model.
(Optional) The input mode to use for the data channel in a training job. If you don't\n set a value for InputMode
, SageMaker uses the value set for\n TrainingInputMode
. Use this parameter to override the\n TrainingInputMode
setting in a AlgorithmSpecification\n request when you have a channel that needs a different input mode from the training\n job's general setting. To download the data from Amazon Simple Storage Service (Amazon S3) to the provisioned ML\n storage volume, and mount the directory to a Docker volume, use File
input\n mode. To stream data directly from Amazon S3 to the container, choose Pipe
input\n mode.
To use a model for incremental training, choose File
input model.
A configuration for a shuffle option for input data in a channel. If you use\n S3Prefix
for S3DataType
, this shuffles the results of the\n S3 key prefix matches. If you use ManifestFile
, the order of the S3 object\n references in the ManifestFile
is shuffled. If you use\n AugmentedManifestFile
, the order of the JSON lines in the\n AugmentedManifestFile
is shuffled. The shuffling order is determined\n using the Seed
value.
For Pipe input mode, shuffling is done at the start of every epoch. With large\n datasets this ensures that the order of the training data is different for each epoch,\n it helps reduce bias and possible overfitting. In a multi-node training job when\n ShuffleConfig is combined with S3DataDistributionType
of\n ShardedByS3Key
, the data is shuffled across nodes so that the content\n sent to a particular node on the first epoch might be sent to a different node on the\n second epoch.
A configuration for a shuffle option for input data in a channel. If you use\n S3Prefix
for S3DataType
, this shuffles the results of the\n S3 key prefix matches. If you use ManifestFile
, the order of the S3 object\n references in the ManifestFile
is shuffled. If you use\n AugmentedManifestFile
, the order of the JSON lines in the\n AugmentedManifestFile
is shuffled. The shuffling order is determined\n using the Seed
value.
For Pipe input mode, shuffling is done at the start of every epoch. With large\n datasets this ensures that the order of the training data is different for each epoch,\n it helps reduce bias and possible overfitting. In a multi-node training job when\n ShuffleConfig is combined with S3DataDistributionType
of\n ShardedByS3Key
, the data is shuffled across nodes so that the content\n sent to a particular node on the first epoch might be sent to a different node on the\n second epoch.
The allowed input mode, either FILE or PIPE.
\nIn FILE mode, Amazon SageMaker copies the data from the input source onto the local\n Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm.\n This is the most commonly used input mode.
\nIn PIPE mode, Amazon SageMaker streams input data from the source directly to your\n algorithm without using the EBS volume.
", + "smithy.api#documentation": "The allowed input mode, either FILE or PIPE.
\nIn FILE mode, Amazon SageMaker copies the data from the input source onto the local\n Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm.\n This is the most commonly used input mode.
\nIn PIPE mode, Amazon SageMaker streams input data from the source directly to your\n algorithm without using the EBS volume.
", "smithy.api#required": {} } } @@ -4142,7 +4094,7 @@ "EnableExplanations": { "target": "com.amazonaws.sagemaker#ClarifyEnableExplanations", "traits": { - "smithy.api#documentation": "A JMESPath boolean expression used to filter which records to explain. Explanations\n are activated by default. See \n EnableExplanations
\n for additional information.
A JMESPath boolean expression used to filter which records to explain. Explanations\n are activated by default. See \n EnableExplanations
\n for additional information.
A zero-based index used to extract a probability value (score) or list from model\n container output in CSV format. If this value is not provided, the entire model\n container output will be treated as a probability value (score) or list.
\n\n Example for a single class model: If the model\n container output consists of a string-formatted prediction label followed by its\n probability: '1,0.6'
, set ProbabilityIndex
to 1
\n to select the probability value 0.6
.
\n Example for a multiclass model: If the model\n container output consists of a string-formatted prediction label followed by its\n probability: '\"[\\'cat\\',\\'dog\\',\\'fish\\']\",\"[0.1,0.6,0.3]\"'
, set\n ProbabilityIndex
to 1
to select the probability values\n [0.1,0.6,0.3]
.
A zero-based index used to extract a probability value (score) or list from model\n container output in CSV format. If this value is not provided, the entire model\n container output will be treated as a probability value (score) or list.
\n\n Example for a single class model: If the model\n container output consists of a string-formatted prediction label followed by its\n probability: '1,0.6'
, set ProbabilityIndex
to 1
\n to select the probability value 0.6
.
\n Example for a multiclass model: If the model\n container output consists of a string-formatted prediction label followed by its\n probability: '\"[\\'cat\\',\\'dog\\',\\'fish\\']\",\"[0.1,0.6,0.3]\"'
, set\n ProbabilityIndex
to 1
to select the probability values\n [0.1,0.6,0.3]
.
A zero-based index used to extract a label header or list of label headers from model\n container output in CSV format.
\n\n Example for a multiclass model: If the model\n container output consists of label headers followed by probabilities:\n '\"[\\'cat\\',\\'dog\\',\\'fish\\']\",\"[0.1,0.6,0.3]\"'
, set\n LabelIndex
to 0
to select the label headers\n ['cat','dog','fish']
.
A zero-based index used to extract a label header or list of label headers from model\n container output in CSV format.
\n\n Example for a multiclass model: If the model\n container output consists of label headers followed by probabilities:\n '\"[\\'cat\\',\\'dog\\',\\'fish\\']\",\"[0.1,0.6,0.3]\"'
, set\n LabelIndex
to 0
to select the label headers\n ['cat','dog','fish']
.
A JMESPath expression used to extract the probability (or score) from the model\n container output if the model container is in JSON Lines format.
\n\n Example: If the model container output of a single\n request is '{\"predicted_label\":1,\"probability\":0.6}'
, then set\n ProbabilityAttribute
to 'probability'
.
A JMESPath expression used to extract the probability (or score) from the model\n container output if the model container is in JSON Lines format.
\n\n Example: If the model container output of a single\n request is '{\"predicted_label\":1,\"probability\":0.6}'
, then set\n ProbabilityAttribute
to 'probability'
.
A JMESPath expression used to locate the list of label headers in the model container\n output.
\n\n Example: If the model container output of a batch\n request is '{\"labels\":[\"cat\",\"dog\",\"fish\"],\"probability\":[0.6,0.3,0.1]}'
,\n then set LabelAttribute
to 'labels'
to extract the list of\n label headers [\"cat\",\"dog\",\"fish\"]
\n
A JMESPath expression used to locate the list of label headers in the model container\n output.
\n\n Example: If the model container output of a batch\n request is '{\"labels\":[\"cat\",\"dog\",\"fish\"],\"probability\":[0.6,0.3,0.1]}'
,\n then set LabelAttribute
to 'labels'
to extract the list of\n label headers [\"cat\",\"dog\",\"fish\"]
\n
The configuration for the SHAP\n baseline (also called the background or reference dataset) of the Kernal\n SHAP algorithm.
\nThe number of records in the baseline data determines the size of the\n synthetic dataset, which has an impact on latency of explainability\n requests. For more information, see the Synthetic\n data of Configure and create an endpoint.
\n\n ShapBaseline
and ShapBaselineUri
are mutually\n exclusive parameters. One or the either is required to configure a SHAP\n baseline.
The configuration for the SHAP\n baseline (also called the background or reference dataset) of the Kernal\n SHAP algorithm.
\nThe number of records in the baseline data determines the size of the\n synthetic dataset, which has an impact on latency of explainability\n requests. For more information, see the Synthetic\n data of Configure and create an endpoint.
\n\n ShapBaseline
and ShapBaselineUri
are mutually\n exclusive parameters. One or the either is required to configure a SHAP\n baseline.
The number of samples to be used for analysis by the Kernal SHAP algorithm.
\nThe number of samples determines the size of the synthetic dataset, which has an\n impact on latency of explainability requests. For more information, see the\n Synthetic data of Configure and create an endpoint.
\nThe number of samples to be used for analysis by the Kernal SHAP algorithm.
\nThe number of samples determines the size of the synthetic dataset, which has an\n impact on latency of explainability requests. For more information, see the\n Synthetic data of Configure and create an endpoint.
\nSpecifies the language of the text features in ISO 639-1 or\n ISO 639-3 code of a\n supported language.
\nFor a mix of multiple languages, use code 'xx'
.
Specifies the language of the text features in ISO 639-1 or\n ISO 639-3 code of a\n supported language.
\nFor a mix of multiple languages, use code 'xx'
.
This parameter is ignored for models that contain only a\n PrimaryContainer
.
When a ContainerDefinition
is part of an inference pipeline, the value of\n the parameter uniquely identifies the container for the purposes of logging and metrics.\n For information, see Use Logs and Metrics\n to Monitor an Inference Pipeline. If you don't specify a value for this\n parameter for a ContainerDefinition
that is part of an inference pipeline,\n a unique name is automatically assigned based on the position of the\n ContainerDefinition
in the pipeline. If you specify a value for the\n ContainerHostName
for any ContainerDefinition
that is part\n of an inference pipeline, you must specify a value for the\n ContainerHostName
parameter of every ContainerDefinition
\n in that pipeline.
This parameter is ignored for models that contain only a\n PrimaryContainer
.
When a ContainerDefinition
is part of an inference pipeline, the value of\n the parameter uniquely identifies the container for the purposes of logging and metrics.\n For information, see Use Logs and Metrics\n to Monitor an Inference Pipeline. If you don't specify a value for this\n parameter for a ContainerDefinition
that is part of an inference pipeline,\n a unique name is automatically assigned based on the position of the\n ContainerDefinition
in the pipeline. If you specify a value for the\n ContainerHostName
for any ContainerDefinition
that is part\n of an inference pipeline, you must specify a value for the\n ContainerHostName
parameter of every ContainerDefinition
\n in that pipeline.
The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a\n Docker registry that is accessible from the same VPC that you configure for your\n endpoint. If you are using your own custom algorithm instead of an algorithm provided by\n SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both\n registry/repository[:tag]
and registry/repository[@digest]
\n image path formats. For more information, see Using Your Own Algorithms with Amazon\n SageMaker\n
The path where inference code is stored. This can be either in Amazon EC2 Container Registry or in a\n Docker registry that is accessible from the same VPC that you configure for your\n endpoint. If you are using your own custom algorithm instead of an algorithm provided by\n SageMaker, the inference code must meet SageMaker requirements. SageMaker supports both\n registry/repository[:tag]
and registry/repository[@digest]
\n image path formats. For more information, see Using Your Own Algorithms with Amazon\n SageMaker\n
Specifies whether the model container is in Amazon ECR or a private Docker registry\n accessible from your Amazon Virtual Private Cloud (VPC). For information about storing containers in a\n private Docker registry, see Use a\n Private Docker Registry for Real-Time Inference Containers\n
" + "smithy.api#documentation": "Specifies whether the model container is in Amazon ECR or a private Docker registry\n accessible from your Amazon Virtual Private Cloud (VPC). For information about storing containers in a\n private Docker registry, see Use a\n Private Docker Registry for Real-Time Inference Containers\n
" } }, "Mode": { @@ -5475,7 +5427,7 @@ "ModelDataUrl": { "target": "com.amazonaws.sagemaker#Url", "traits": { - "smithy.api#documentation": "The S3 path where the model artifacts, which result from model training, are stored.\n This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3\n path is required for SageMaker built-in algorithms, but not if you use your own algorithms.\n For more information on built-in algorithms, see Common\n Parameters.
\nThe model artifacts must be in an S3 bucket that is in the same region as the\n model or endpoint you are creating.
\nIf you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token\n Service to download model artifacts from the S3 path you provide. Amazon Web Services STS\n is activated in your IAM user account by default. If you previously deactivated\n Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS\n for that region. For more information, see Activating and\n Deactivating Amazon Web Services STS in an Amazon Web Services Region in the\n Amazon Web Services Identity and Access Management User\n Guide.
\nIf you use a built-in algorithm to create a model, SageMaker requires that you provide\n a S3 path to the model artifacts in ModelDataUrl
.
The S3 path where the model artifacts, which result from model training, are stored.\n This path must point to a single gzip compressed tar archive (.tar.gz suffix). The S3\n path is required for SageMaker built-in algorithms, but not if you use your own algorithms.\n For more information on built-in algorithms, see Common\n Parameters.
\nThe model artifacts must be in an S3 bucket that is in the same region as the\n model or endpoint you are creating.
\nIf you provide a value for this parameter, SageMaker uses Amazon Web Services Security Token\n Service to download model artifacts from the S3 path you provide. Amazon Web Services STS\n is activated in your IAM user account by default. If you previously deactivated\n Amazon Web Services STS for a region, you need to reactivate Amazon Web Services STS\n for that region. For more information, see Activating and\n Deactivating Amazon Web Services STS in an Amazon Web Services Region in the\n Amazon Web Services Identity and Access Management User\n Guide.
\nIf you use a built-in algorithm to create a model, SageMaker requires that you provide\n a S3 path to the model artifacts in ModelDataUrl
.
The scale that hyperparameter tuning uses to search the hyperparameter range. For\n information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
\nSageMaker hyperparameter tuning chooses the best scale for the\n hyperparameter.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a linear scale.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a logarithmic scale.
\nLogarithmic scaling works only for ranges that have only values greater\n than 0.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a reverse logarithmic scale.
\nReverse logarithmic scaling works only for ranges that are entirely within\n the range 0<=x<1.0.
\nThe scale that hyperparameter tuning uses to search the hyperparameter range. For\n information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
\nSageMaker hyperparameter tuning chooses the best scale for the\n hyperparameter.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a linear scale.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a logarithmic scale.
\nLogarithmic scaling works only for ranges that have only values greater\n than 0.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a reverse logarithmic scale.
\nReverse logarithmic scaling works only for ranges that are entirely within\n the range 0<=x<1.0.
\nSpecifies details about training jobs run by this algorithm, including the\n following:
\nThe Amazon ECR path of the container and the version digest of the\n algorithm.
\nThe hyperparameters that the algorithm supports.
\nThe instance types that the algorithm supports for training.
\nWhether the algorithm supports distributed training.
\nThe metrics that the algorithm emits to Amazon CloudWatch.
\nWhich metrics that the algorithm emits can be used as the objective metric for\n hyperparameter tuning jobs.
\nThe input channels that the algorithm supports for training data. For example,\n an algorithm might support train
, validation
, and\n test
channels.
Specifies details about training jobs run by this algorithm, including the\n following:
\nThe Amazon ECR path of the container and the version digest of the\n algorithm.
\nThe hyperparameters that the algorithm supports.
\nThe instance types that the algorithm supports for training.
\nWhether the algorithm supports distributed training.
\nThe metrics that the algorithm emits to Amazon CloudWatch.
\nWhich metrics that the algorithm emits can be used as the objective metric for\n hyperparameter tuning jobs.
\nThe input channels that the algorithm supports for training data. For example,\n an algorithm might support train
, validation
, and\n test
channels.
Specifies details about inference jobs that the algorithm runs, including the\n following:
\nThe Amazon ECR paths of containers that contain the inference code and model\n artifacts.
\nThe instance types that the algorithm supports for transform jobs and\n real-time endpoints used for inference.
\nThe input and output content formats that the algorithm supports for\n inference.
\nSpecifies details about inference jobs that the algorithm runs, including the\n following:
\nThe Amazon ECR paths of containers that contain the inference code and model\n artifacts.
\nThe instance types that the algorithm supports for transform jobs and\n real-time endpoints used for inference.
\nThe input and output content formats that the algorithm supports for\n inference.
\nThe user profile name. If this value is not set, then SpaceName
must be set.
The user profile name.
" } }, "AppType": { @@ -6061,7 +6013,7 @@ "SpaceName": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "The name of the space. If this value is not set, then UserProfileName
must be set.
The name of the space.
" } } } @@ -6256,7 +6208,7 @@ "target": "com.amazonaws.sagemaker#CreateCodeRepositoryOutput" }, "traits": { - "smithy.api#documentation": "Creates a Git repository as a resource in your SageMaker account. You can associate the\n repository with notebook instances so that you can use Git source control for the\n notebooks you create. The Git repository is a resource in your SageMaker account, so it can\n be associated with more than one notebook instance, and it persists independently from\n the lifecycle of any notebook instances it is associated with.
\nThe repository can be hosted either in Amazon Web Services CodeCommit\n or in any other Git repository.
" + "smithy.api#documentation": "Creates a Git repository as a resource in your SageMaker account. You can associate the\n repository with notebook instances so that you can use Git source control for the\n notebooks you create. The Git repository is a resource in your SageMaker account, so it can\n be associated with more than one notebook instance, and it persists independently from\n the lifecycle of any notebook instances it is associated with.
\nThe repository can be hosted either in Amazon Web Services CodeCommit\n or in any other Git repository.
" } }, "com.amazonaws.sagemaker#CreateCodeRepositoryInput": { @@ -6313,7 +6265,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the\n resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.
\nIf\n you choose to host your model using Amazon SageMaker hosting services, you can use the resulting\n model artifacts as part of the model. You can also use the artifacts with\n Amazon Web Services\n IoT Greengrass. In that case, deploy them as an ML\n resource.
\nIn the request body, you provide the following:
\nA name for the compilation job
\nInformation about the input model artifacts
\nThe output location for the compiled model and the device (target) that the\n model runs on
\nThe Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform\n the model compilation job.
\nYou can also provide a Tag
to track the model compilation job's resource\n use and costs. The response body contains the\n CompilationJobArn
\n for the compiled job.
To stop a model compilation job, use StopCompilationJob. To get\n information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model\n compilation jobs, use ListCompilationJobs.
" + "smithy.api#documentation": "Starts a model compilation job. After the model has been compiled, Amazon SageMaker saves the\n resulting model artifacts to an Amazon Simple Storage Service (Amazon S3) bucket that you specify.
\nIf\n you choose to host your model using Amazon SageMaker hosting services, you can use the resulting\n model artifacts as part of the model. You can also use the artifacts with\n Amazon Web Services\n IoT Greengrass. In that case, deploy them as an ML\n resource.
\nIn the request body, you provide the following:
\nA name for the compilation job
\nInformation about the input model artifacts
\nThe output location for the compiled model and the device (target) that the\n model runs on
\nThe Amazon Resource Name (ARN) of the IAM role that Amazon SageMaker assumes to perform\n the model compilation job.
\nYou can also provide a Tag
to track the model compilation job's resource\n use and costs. The response body contains the\n CompilationJobArn
\n for the compiled job.
To stop a model compilation job, use StopCompilationJob. To get\n information about a particular model compilation job, use DescribeCompilationJob. To get information about multiple model\n compilation jobs, use ListCompilationJobs.
" } }, "com.amazonaws.sagemaker#CreateCompilationJobRequest": { @@ -6329,7 +6281,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on\n your behalf.
\nDuring model compilation, Amazon SageMaker needs your permission to:
\nRead input data from an S3 bucket
\nWrite model artifacts to an S3 bucket
\nWrite logs to Amazon CloudWatch Logs
\nPublish metrics to Amazon CloudWatch
\nYou grant permissions for all of these tasks to an IAM role. To pass this role to\n Amazon SageMaker, the caller of this API must have the iam:PassRole
permission. For\n more information, see Amazon SageMaker\n Roles.\n
The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on\n your behalf.
\nDuring model compilation, Amazon SageMaker needs your permission to:
\nRead input data from an S3 bucket
\nWrite model artifacts to an S3 bucket
\nWrite logs to Amazon CloudWatch Logs
\nPublish metrics to Amazon CloudWatch
\nYou grant permissions for all of these tasks to an IAM role. To pass this role to\n Amazon SageMaker, the caller of this API must have the iam:PassRole
permission. For\n more information, see Amazon SageMaker\n Roles.\n
If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns\n the following data in JSON format:
\n\n CompilationJobArn
: The Amazon Resource Name (ARN) of the compiled\n job.
If the action is successful, the service sends back an HTTP 200 response. Amazon SageMaker returns\n the following data in JSON format:
\n\n CompilationJobArn
: The Amazon Resource Name (ARN) of the compiled\n job.
Creates a Domain
used by Amazon SageMaker Studio. A domain consists of an associated\n Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application,\n policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region.\n Users within a domain can share notebook files and other artifacts with each other.
\n EFS storage\n
\nWhen a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks,\n Git repositories, and data files.
\nSageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with\n an Amazon Web Services managed key by default. For more control, you can specify a\n customer managed key. For more information, see\n Protect Data at\n Rest Using Encryption.
\n\n VPC configuration\n
\nAll SageMaker Studio traffic between the domain and the EFS volume is through the specified\n VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType
\n parameter. AppNetworkAccessType
corresponds to the network access type that you\n choose when you onboard to Studio. The following options are available:
\n PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.
\n VpcOnly
- All Studio traffic is through the specified VPC and subnets.\n Internet access is disabled by default. To allow internet access, you must specify a\n NAT gateway.
When internet access is disabled, you won't be able to run a Studio notebook or to\n train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime\n or a NAT gateway and your security groups allow outbound connections.
\nNFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a SageMaker Studio app successfully.
\nFor more information, see\n Connect\n SageMaker Studio Notebooks to Resources in a VPC.
" + "smithy.api#documentation": "Creates a Domain
used by Amazon SageMaker Studio. A domain consists of an associated\n Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application,\n policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region.\n Users within a domain can share notebook files and other artifacts with each other.
\n EFS storage\n
\nWhen a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks,\n Git repositories, and data files.
\nSageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with\n an Amazon Web Services managed key by default. For more control, you can specify a\n customer managed key. For more information, see\n Protect Data at\n Rest Using Encryption.
\n\n\n VPC configuration\n
\nAll SageMaker Studio traffic between the domain and the EFS volume is through the specified\n VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType
\n parameter. AppNetworkAccessType
corresponds to the network access type that you\n choose when you onboard to Studio. The following options are available:
\n PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.
\n VpcOnly
- All Studio traffic is through the specified VPC and subnets.\n Internet access is disabled by default. To allow internet access, you must specify a\n NAT gateway.
When internet access is disabled, you won't be able to run a Studio notebook or to\n train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime\n or a NAT gateway and your security groups allow outbound connections.
\nNFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a SageMaker Studio app successfully.
\nFor more information, see\n Connect\n SageMaker Studio Notebooks to Resources in a VPC.
" } }, "com.amazonaws.sagemaker#CreateDomainRequest": { @@ -6933,7 +6885,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates an endpoint using the endpoint configuration specified in the request. SageMaker\n uses the endpoint to provision resources and deploy models. You create the endpoint\n configuration with the CreateEndpointConfig API.
\nUse this API to deploy models using SageMaker hosting services.
\nFor an example that calls this method when deploying a model to SageMaker hosting services,\n see the Create Endpoint example notebook.\n
\n You must not delete an EndpointConfig
that is in use by an endpoint\n that is live or while the UpdateEndpoint
or CreateEndpoint
\n operations are being performed on the endpoint. To update an endpoint, you must\n create a new EndpointConfig
.
The endpoint name must be unique within an Amazon Web Services Region in your\n Amazon Web Services account.
\nWhen it receives the request, SageMaker creates the endpoint, launches the resources (ML\n compute instances), and deploys the model(s) on them.
\nWhen you call CreateEndpoint, a load call is made to DynamoDB to\n verify that your endpoint configuration exists. When you read data from a DynamoDB\n table supporting \n Eventually Consistent Reads
\n , the response might not\n reflect the results of a recently completed write operation. The response might\n include some stale data. If the dependent entities are not yet in DynamoDB, this\n causes a validation error. If you repeat your read request after a short time, the\n response should return the latest data. So retry logic is recommended to handle\n these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
When SageMaker receives the request, it sets the endpoint status to\n Creating
. After it creates the endpoint, it sets the status to\n InService
. SageMaker can then process incoming requests for inferences. To\n check the status of an endpoint, use the DescribeEndpoint\n API.
If any of the models hosted at this endpoint get model data from an Amazon S3 location,\n SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the\n S3 path you provided. Amazon Web Services STS is activated in your IAM user account by\n default. If you previously deactivated Amazon Web Services STS for a region, you need to\n reactivate Amazon Web Services STS for that region. For more information, see Activating and\n Deactivating Amazon Web Services STS in an Amazon Web Services Region in the\n Amazon Web Services Identity and Access Management User\n Guide.
\nTo add the IAM role policies for using this API operation, go to the IAM console, and choose\n Roles in the left navigation pane. Search the IAM role that you want to grant\n access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to\n the role.
\nOption 1: For a full SageMaker access, search and attach the\n AmazonSageMakerFullAccess
policy.
Option 2: For granting a limited access to an IAM role, paste the\n following Action elements manually into the JSON file of the IAM role:
\n\n \"Action\": [\"sagemaker:CreateEndpoint\",\n \"sagemaker:CreateEndpointConfig\"]
\n
\n \"Resource\": [
\n
\n \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"
\n
\n \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"
\n
\n ]
\n
For more information, see SageMaker API\n Permissions: Actions, Permissions, and Resources\n Reference.
\nCreates an endpoint using the endpoint configuration specified in the request. SageMaker\n uses the endpoint to provision resources and deploy models. You create the endpoint\n configuration with the CreateEndpointConfig API.
\nUse this API to deploy models using SageMaker hosting services.
\nFor an example that calls this method when deploying a model to SageMaker hosting services,\n see the Create Endpoint example notebook.\n
\n You must not delete an EndpointConfig
that is in use by an endpoint\n that is live or while the UpdateEndpoint
or CreateEndpoint
\n operations are being performed on the endpoint. To update an endpoint, you must\n create a new EndpointConfig
.
The endpoint name must be unique within an Amazon Web Services Region in your\n Amazon Web Services account.
\nWhen it receives the request, SageMaker creates the endpoint, launches the resources (ML\n compute instances), and deploys the model(s) on them.
\n \nWhen you call CreateEndpoint, a load call is made to DynamoDB to\n verify that your endpoint configuration exists. When you read data from a DynamoDB\n table supporting \n Eventually Consistent Reads
\n , the response might not\n reflect the results of a recently completed write operation. The response might\n include some stale data. If the dependent entities are not yet in DynamoDB, this\n causes a validation error. If you repeat your read request after a short time, the\n response should return the latest data. So retry logic is recommended to handle\n these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
When SageMaker receives the request, it sets the endpoint status to\n Creating
. After it creates the endpoint, it sets the status to\n InService
. SageMaker can then process incoming requests for inferences. To\n check the status of an endpoint, use the DescribeEndpoint\n API.
If any of the models hosted at this endpoint get model data from an Amazon S3 location,\n SageMaker uses Amazon Web Services Security Token Service to download model artifacts from the\n S3 path you provided. Amazon Web Services STS is activated in your IAM user account by\n default. If you previously deactivated Amazon Web Services STS for a region, you need to\n reactivate Amazon Web Services STS for that region. For more information, see Activating and\n Deactivating Amazon Web Services STS in an Amazon Web Services Region in the\n Amazon Web Services Identity and Access Management User\n Guide.
\nTo add the IAM role policies for using this API operation, go to the IAM console, and choose\n Roles in the left navigation pane. Search the IAM role that you want to grant\n access to use the CreateEndpoint and CreateEndpointConfig API operations, add the following policies to\n the role.
\nOption 1: For a full SageMaker access, search and attach the\n AmazonSageMakerFullAccess
policy.
Option 2: For granting a limited access to an IAM role, paste the\n following Action elements manually into the JSON file of the IAM role:
\n\n \"Action\": [\"sagemaker:CreateEndpoint\",\n \"sagemaker:CreateEndpointConfig\"]
\n
\n \"Resource\": [
\n
\n \"arn:aws:sagemaker:region:account-id:endpoint/endpointName\"
\n
\n \"arn:aws:sagemaker:region:account-id:endpoint-config/endpointConfigName\"
\n
\n ]
\n
For more information, see SageMaker API\n Permissions: Actions, Permissions, and Resources\n Reference.
\nCreates an endpoint configuration that SageMaker hosting services uses to deploy models. In\n the configuration, you identify one or more models, created using the\n CreateModel
API, to deploy and the resources that you want SageMaker to\n provision. Then you call the CreateEndpoint API.
Use this API if you want to use SageMaker hosting services to deploy models into\n production.
\nIn the request, you define a ProductionVariant
, for each model that you\n want to deploy. Each ProductionVariant
parameter also describes the\n resources that you want SageMaker to provision. This includes the number and type of ML\n compute instances to deploy.
If you are hosting multiple models, you also assign a VariantWeight
to\n specify how much traffic you want to allocate to each model. For example, suppose that\n you want to host two models, A and B, and you assign traffic weight 2 for model A and 1\n for model B. SageMaker distributes two-thirds of the traffic to Model A, and one-third to\n model B.
When you call CreateEndpoint, a load call is made to DynamoDB to\n verify that your endpoint configuration exists. When you read data from a DynamoDB\n table supporting \n Eventually Consistent Reads
\n , the response might not\n reflect the results of a recently completed write operation. The response might\n include some stale data. If the dependent entities are not yet in DynamoDB, this\n causes a validation error. If you repeat your read request after a short time, the\n response should return the latest data. So retry logic is recommended to handle\n these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
Creates an endpoint configuration that SageMaker hosting services uses to deploy models. In\n the configuration, you identify one or more models, created using the\n CreateModel
API, to deploy and the resources that you want SageMaker to\n provision. Then you call the CreateEndpoint API.
Use this API if you want to use SageMaker hosting services to deploy models into\n production.
\nIn the request, you define a ProductionVariant
, for each model that you\n want to deploy. Each ProductionVariant
parameter also describes the\n resources that you want SageMaker to provision. This includes the number and type of ML\n compute instances to deploy.
If you are hosting multiple models, you also assign a VariantWeight
to\n specify how much traffic you want to allocate to each model. For example, suppose that\n you want to host two models, A and B, and you assign traffic weight 2 for model A and 1\n for model B. SageMaker distributes two-thirds of the traffic to Model A, and one-third to\n model B.
When you call CreateEndpoint, a load call is made to DynamoDB to\n verify that your endpoint configuration exists. When you read data from a DynamoDB\n table supporting \n Eventually Consistent Reads
\n , the response might not\n reflect the results of a recently completed write operation. The response might\n include some stale data. If the dependent entities are not yet in DynamoDB, this\n causes a validation error. If you repeat your read request after a short time, the\n response should return the latest data. So retry logic is recommended to handle\n these possible issues. We also recommend that customers call DescribeEndpointConfig before calling CreateEndpoint to minimize the potential impact of a DynamoDB eventually consistent read.
An array of ProductionVariant
objects, one for each model that you want\n to host at this endpoint.
An list of ProductionVariant
objects, one for each model that you want\n to host at this endpoint.
The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that\n hosts the endpoint.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
, UpdateEndpoint
requests. For more\n information, refer to the Amazon Web Services Key Management Service section Using Key\n Policies in Amazon Web Services KMS \n
Certain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a KmsKeyId
when using an instance type with local\n storage. If any of the models that you specify in the\n ProductionVariants
parameter use nitro-based instances with local\n storage, do not specify a value for the KmsKeyId
parameter. If you\n specify a value for KmsKeyId
when using any nitro-based instances with\n local storage, the call to CreateEndpointConfig
fails.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nThe Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that\n SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that\n hosts the endpoint.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
, UpdateEndpoint
requests. For more\n information, refer to the Amazon Web Services Key Management Service section Using Key\n Policies in Amazon Web Services KMS \n
Certain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a KmsKeyId
when using an instance type with local\n storage. If any of the models that you specify in the\n ProductionVariants
parameter use nitro-based instances with local\n storage, do not specify a value for the KmsKeyId
parameter. If you\n specify a value for KmsKeyId
when using any nitro-based instances with\n local storage, the call to CreateEndpointConfig
fails.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nAn array of ProductionVariant
objects, one for each model that you want\n to host at this endpoint in shadow mode with production traffic replicated from the\n model specified on ProductionVariants
. If you use this field, you can only\n specify one variant for ProductionVariants
and one variant for\n ShadowProductionVariants
.
\n Array of ProductionVariant
objects. There is one for each model that you want to host at this\n endpoint in shadow mode with production traffic replicated from the model specified on\n ProductionVariants
.If you use this field, you can only specify one variant for\n ProductionVariants
and one variant for ShadowProductionVariants
.\n
Creates a SageMaker experiment. An experiment is a collection of\n trials that are observed, compared and evaluated as a group. A trial is\n a set of steps, called trial components, that produce a machine learning\n model.
\nIn the Studio UI, trials are referred to as run groups and trial\n components are referred to as runs.
\nThe goal of an experiment is to determine the components that produce the best model.\n Multiple trials are performed, each one isolating and measuring the impact of a change to one\n or more inputs, while keeping the remaining inputs constant.
\nWhen you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial\n components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you\n must use the logging APIs provided by the SDK.
\nYou can add tags to experiments, trials, trial components and then use the Search API to search for the tags.
\nTo add a description to an experiment, specify the optional Description
\n parameter. To add a description later, or to change the description, call the UpdateExperiment API.
To get a list of all your experiments, call the ListExperiments API. To\n view an experiment's properties, call the DescribeExperiment API. To get a\n list of all the trials associated with an experiment, call the ListTrials\n API. To create a trial call the CreateTrial API.
" + "smithy.api#documentation": "Creates an SageMaker experiment. An experiment is a collection of\n trials that are observed, compared and evaluated as a group. A trial is\n a set of steps, called trial components, that produce a machine learning\n model.
\nThe goal of an experiment is to determine the components that produce the best model.\n Multiple trials are performed, each one isolating and measuring the impact of a change to one\n or more inputs, while keeping the remaining inputs constant.
\nWhen you use SageMaker Studio or the SageMaker Python SDK, all experiments, trials, and trial\n components are automatically tracked, logged, and indexed. When you use the Amazon Web Services SDK for Python (Boto), you\n must use the logging APIs provided by the SDK.
\nYou can add tags to experiments, trials, trial components and then use the Search API to search for the tags.
\nTo add a description to an experiment, specify the optional Description
\n parameter. To add a description later, or to change the description, call the UpdateExperiment API.
To get a list of all your experiments, call the ListExperiments API. To\n view an experiment's properties, call the DescribeExperiment API. To get a\n list of all the trials associated with an experiment, call the ListTrials\n API. To create a trial call the CreateTrial API.
" } }, "com.amazonaws.sagemaker#CreateExperimentRequest": { @@ -7175,7 +7127,7 @@ "OfflineStoreConfig": { "target": "com.amazonaws.sagemaker#OfflineStoreConfig", "traits": { - "smithy.api#documentation": "Use this to configure an OfflineFeatureStore
. This parameter allows you to\n specify:
The Amazon Simple Storage Service (Amazon S3) location of an\n OfflineStore
.
A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data catalog.
\nAn KMS encryption key to encrypt the Amazon S3 location used for\n OfflineStore
. If KMS encryption key is not specified, by default we encrypt all data at rest using \n Amazon Web Services KMS key. By defining your bucket-level key for SSE, \n you can reduce Amazon Web Services KMS requests costs by up to 99 percent.
Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg.
\nTo learn more about this parameter, see OfflineStoreConfig.
" + "smithy.api#documentation": "Use this to configure an OfflineFeatureStore
. This parameter allows you to\n specify:
The Amazon Simple Storage Service (Amazon S3) location of an\n OfflineStore
.
A configuration for an Amazon Web Services Glue or Amazon Web Services Hive data catalog.
\nAn KMS encryption key to encrypt the Amazon S3 location used for\n OfflineStore
. If KMS encryption key is not specified, by default we encrypt all data at rest using \n Amazon Web Services KMS key. By defining your bucket-level key for SSE, \n you can reduce Amazon Web Services KMS requests costs by up to 99 percent.
To learn more about this parameter, see OfflineStoreConfig.
" } }, "RoleArn": { @@ -7441,7 +7393,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a hyperparameter tuning job. A hyperparameter tuning job finds the best version\n of a model by running many training jobs on your dataset using the algorithm you choose\n and values for hyperparameters within ranges that you specify. It then chooses the\n hyperparameter values that result in a model that performs the best, as measured by an\n objective metric that you choose.
\nA hyperparameter tuning job automatically creates Amazon SageMaker experiments, trials, and\n trial components for each training job that it runs. You can view these entities in\n Amazon SageMaker Studio. For more information, see View\n Experiments, Trials, and Trial Components.
\nDo not include any security-sensitive information including account access IDs,\n secrets or tokens in any hyperparameter field. If the use of security-sensitive\n credentials are detected, SageMaker will reject your training job request and return an\n exception error.
\nStarts a hyperparameter tuning job. A hyperparameter tuning job finds the best version\n of a model by running many training jobs on your dataset using the algorithm you choose\n and values for hyperparameters within ranges that you specify. It then chooses the\n hyperparameter values that result in a model that performs the best, as measured by an\n objective metric that you choose.
\nA hyperparameter tuning job automatically creates Amazon SageMaker experiments, trials, and\n trial components for each training job that it runs. You can view these entities in\n Amazon SageMaker Studio. For more information, see View\n Experiments, Trials, and Trial Components.
\nDo not include any security-sensitive information including account access IDs,\n secrets or tokens in any hyperparameter field. If the use of security-sensitive\n credentials are detected, SageMaker will reject your training job request and return an\n exception error.
\nSpecifies the configuration for starting the hyperparameter tuning job using one or\n more previous tuning jobs as a starting point. The results of previous tuning jobs are\n used to inform which combinations of hyperparameters to search over in the new tuning\n job.
\nAll training jobs launched by the new hyperparameter tuning job are evaluated by using\n the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM
as the\n WarmStartType
value for the warm start configuration, the training job\n that performs the best in the new tuning job is compared to the best training jobs from\n the parent tuning jobs. From these, the training job that performs the best as measured\n by the objective metric is returned as the overall best training job.
All training jobs launched by parent hyperparameter tuning jobs and the new\n hyperparameter tuning jobs count against the limit of training jobs for the tuning\n job.
\nSpecifies the configuration for starting the hyperparameter tuning job using one or\n more previous tuning jobs as a starting point. The results of previous tuning jobs are\n used to inform which combinations of hyperparameters to search over in the new tuning\n job.
\nAll training jobs launched by the new hyperparameter tuning job are evaluated by using\n the objective metric. If you specify IDENTICAL_DATA_AND_ALGORITHM
as the\n WarmStartType
value for the warm start configuration, the training job\n that performs the best in the new tuning job is compared to the best training jobs from\n the parent tuning jobs. From these, the training job that performs the best as measured\n by the objective metric is returned as the overall best training job.
All training jobs launched by parent hyperparameter tuning jobs and the new\n hyperparameter tuning jobs count against the limit of training jobs for the tuning\n job.
\nAn array of key-value pairs. You can use tags to categorize your Amazon Web Services\n resources in different ways, for example, by purpose, owner, or environment. For more\n information, see Tagging Amazon Web Services Resources.
\nTags that you specify for the tuning job are also added to all training jobs that the\n tuning job launches.
" + "smithy.api#documentation": "An array of key-value pairs. You can use tags to categorize your Amazon Web Services\n resources in different ways, for example, by purpose, owner, or environment. For more\n information, see Tagging Amazon Web Services Resources.
\nTags that you specify for the tuning job are also added to all training jobs that the\n tuning job launches.
" } } } @@ -7544,7 +7496,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The ARN of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an IAM role that enables Amazon SageMaker to perform tasks on your behalf.
", "smithy.api#required": {} } }, @@ -7562,7 +7514,7 @@ "ImageArn": { "target": "com.amazonaws.sagemaker#ImageArn", "traits": { - "smithy.api#documentation": "The ARN of the image.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image.
" } } } @@ -7614,55 +7566,6 @@ "smithy.api#documentation": "The ImageName
of the Image
to create a version of.
A list of aliases created with the image version.
" - } - }, - "VendorGuidance": { - "target": "com.amazonaws.sagemaker#VendorGuidance", - "traits": { - "smithy.api#documentation": "The stability of the image version, specified by the maintainer.
\n\n NOT_PROVIDED
: The maintainers did not provide a status for image version stability.
\n STABLE
: The image version is stable.
\n TO_BE_ARCHIVED
: The image version is set to be archived. Custom image versions that are set to be archived are automatically archived after three months.
\n ARCHIVED
: The image version is archived. Archived image versions are not searchable and are no longer actively supported.
Indicates SageMaker job type compatibility.
\n\n TRAINING
: The image version is compatible with SageMaker training jobs.
\n INFERENCE
: The image version is compatible with SageMaker inference jobs.
\n NOTEBOOK_KERNEL
: The image version is compatible with SageMaker notebook kernels.
The machine learning framework vended in the image version.
" - } - }, - "ProgrammingLang": { - "target": "com.amazonaws.sagemaker#ProgrammingLang", - "traits": { - "smithy.api#documentation": "The supported programming language and its version.
" - } - }, - "Processor": { - "target": "com.amazonaws.sagemaker#Processor", - "traits": { - "smithy.api#documentation": "Indicates CPU or GPU compatibility.
\n\n CPU
: The image version is compatible with CPU.
\n GPU
: The image version is compatible with GPU.
Indicates Horovod compatibility.
" - } - }, - "ReleaseNotes": { - "target": "com.amazonaws.sagemaker#ReleaseNotes", - "traits": { - "smithy.api#documentation": "The maintainer description of the image version.
" - } } } }, @@ -7672,7 +7575,7 @@ "ImageVersionArn": { "target": "com.amazonaws.sagemaker#ImageVersionArn", "traits": { - "smithy.api#documentation": "The ARN of the image version.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image version.
" } } } @@ -7694,7 +7597,7 @@ } ], "traits": { - "smithy.api#documentation": "\n Creates an inference experiment using the configurations specified in the request.\n
\n\n Use this API to setup and schedule an experiment to compare model variants on a Amazon SageMaker inference endpoint. For\n more information about inference experiments, see Shadow tests.\n
\n\n Amazon SageMaker begins your experiment at the scheduled time and routes traffic to your endpoint's model variants based\n on your specified configuration.\n
\n\n While the experiment is in progress or after it has concluded, you can view metrics that compare your model\n variants. For more information, see View, monitor, and edit shadow tests.\n
" + "smithy.api#documentation": "\n Creates an inference experiment using the configurations specified in the request.\n
\n\n Use this API to schedule an experiment to compare model variants on a Amazon SageMaker inference endpoint. For more\n information about inference experiments, see Shadow tests.\n
\n\n Amazon SageMaker begins your experiment at the scheduled time and routes traffic to your endpoint's model variants based\n on your specified configuration.\n
\n\n While the experiment is in progress or after it has concluded, you can view metrics that compare your model\n variants. For more information, see View, monitor, and edit shadow tests.\n
" } }, "com.amazonaws.sagemaker#CreateInferenceExperimentRequest": { @@ -7710,14 +7613,14 @@ "Type": { "target": "com.amazonaws.sagemaker#InferenceExperimentType", "traits": { - "smithy.api#documentation": "\n The type of the inference experiment that you want to run. The following types of experiments are possible:\n
\n\n ShadowMode
: You can use this type to validate a shadow variant. For more information,\n see Shadow tests.\n
\n The type of the inference experiment that you want to run. The following types of experiments are possible:\n
\n\n ShadowMode
: You can use this type to validate a shadow variant. For more information,\n see Shadow tests.\n
\n The duration for which you want the inference experiment to run. If you don't specify this field, the\n experiment automatically starts immediately upon creation and concludes after 7 days.\n
" + "smithy.api#documentation": "\n The duration for which you want the inference experiment to run. If you don't specify this field, the\n experiment automatically concludes after 7 days.\n
" } }, "Description": { @@ -7729,7 +7632,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "\n The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage\n Amazon SageMaker Inference endpoints for model deployment.\n
", + "smithy.api#documentation": "\n The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images.\n
", "smithy.api#required": {} } }, @@ -7743,27 +7646,27 @@ "ModelVariants": { "target": "com.amazonaws.sagemaker#ModelVariantConfigList", "traits": { - "smithy.api#documentation": "\n An array of ModelVariantConfig
objects. There is one for each variant in the inference\n experiment. Each ModelVariantConfig
object in the array describes the infrastructure\n configuration for the corresponding variant.\n
\n Array of ModelVariantConfigSummary
objects. There is one for each variant in the inference\n experiment. Each ModelVariantConfigSummary
object in the array describes the infrastructure\n configuration for the corresponding variant.\n
\n The Amazon S3 location and configuration for storing inference request and response data.\n
\n\n This is an optional parameter that you can use for data capture. For more information, see Capture data.\n
" + "smithy.api#documentation": "\n The storage configuration for the inference experiment. This is an optional parameter that you can use for\n data capture. For more information, see Capture data.\n
" } }, "ShadowModeConfig": { "target": "com.amazonaws.sagemaker#ShadowModeConfig", "traits": { - "smithy.api#documentation": "\n The configuration of ShadowMode
inference experiment type. Use this field to specify a\n production variant which takes all the inference requests, and a shadow variant to which Amazon SageMaker replicates a\n percentage of the inference requests. For the shadow variant also specify the percentage of requests that\n Amazon SageMaker replicates.\n
\n Shows which variant is the production variant and which variant is the shadow variant. For the shadow\n variant, also shows the sampling percentage.\n
", "smithy.api#required": {} } }, "KmsKey": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "\n The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on\n the storage volume attached to the ML compute instance that hosts the endpoint. The KmsKey
can\n be any of the following formats:\n
KMS key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
KMS key Alias
\n\n \"alias/ExampleAlias\"
\n
Amazon Resource Name (ARN) of a KMS key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
\n If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must include permissions to\n call kms:Encrypt
. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for\n your role's account. Amazon SageMaker uses server-side encryption with KMS managed keys for\n OutputDataConfig
. If you use a bucket policy with an s3:PutObject
permission that\n only allows objects with server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more information, see KMS managed Encryption Keys\n in the Amazon Simple Storage Service Developer Guide.\n
\n The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
and UpdateEndpoint
requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.\n
\n The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt\n data on the storage volume attached to the ML compute instance that hosts the endpoint. The KmsKey
\n can be any of the following formats:\n
KMS key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
KMS key Alias
\n\n \"alias/ExampleAlias\"
\n
Amazon Resource Name (ARN) of a KMS key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
\n If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must include permissions to\n call kms:Encrypt
. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for\n your role's account. Amazon SageMaker uses server-side encryption with KMS managed keys for\n OutputDataConfig
. If you use a bucket policy with an s3:PutObject
permission that\n only allows objects with server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more information, see KMS managed Encryption Keys\n in the Amazon Simple Storage Service Developer Guide.\n
\n The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
and UpdateEndpoint
requests. For more information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.\n
Creates a job that uses workers to label the data objects in your input dataset. You\n can use the labeled data to train machine learning models.
\nYou can select your workforce from one of three providers:
\nA private workforce that you create. It can include employees, contractors,\n and outside experts. Use a private workforce when want the data to stay within\n your organization or when a specific set of skills is required.
\nOne or more vendors that you select from the Amazon Web Services Marketplace. Vendors provide\n expertise in specific areas.
\nThe Amazon Mechanical Turk workforce. This is the largest workforce, but it\n should only be used for public data or data that has been stripped of any\n personally identifiable information.
\nYou can also use automated data labeling to reduce the number of\n data objects that need to be labeled by a human. Automated data labeling uses\n active learning to determine if a data object can be labeled by\n machine or if it needs to be sent to a human worker. For more information, see Using\n Automated Data Labeling.
\nThe data objects to be labeled are contained in an Amazon S3 bucket. You create a\n manifest file that describes the location of each object. For\n more information, see Using Input and Output Data.
\nThe output can be used as the manifest file for another labeling job or as training\n data for your machine learning models.
\nYou can use this operation to create a static labeling job or a streaming labeling\n job. A static labeling job stops if all data objects in the input manifest file\n identified in ManifestS3Uri
have been labeled. A streaming labeling job\n runs perpetually until it is manually stopped, or remains idle for 10 days. You can send\n new data objects to an active (InProgress
) streaming labeling job in real\n time. To learn how to create a static labeling job, see Create a Labeling Job\n (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming\n labeling job, see Create a Streaming Labeling\n Job.
Creates a job that uses workers to label the data objects in your input dataset. You\n can use the labeled data to train machine learning models.
\nYou can select your workforce from one of three providers:
\nA private workforce that you create. It can include employees, contractors,\n and outside experts. Use a private workforce when want the data to stay within\n your organization or when a specific set of skills is required.
\nOne or more vendors that you select from the Amazon Web Services Marketplace. Vendors provide\n expertise in specific areas.
\nThe Amazon Mechanical Turk workforce. This is the largest workforce, but it\n should only be used for public data or data that has been stripped of any\n personally identifiable information.
\nYou can also use automated data labeling to reduce the number of\n data objects that need to be labeled by a human. Automated data labeling uses\n active learning to determine if a data object can be labeled by\n machine or if it needs to be sent to a human worker. For more information, see Using\n Automated Data Labeling.
\nThe data objects to be labeled are contained in an Amazon S3 bucket. You create a\n manifest file that describes the location of each object. For\n more information, see Using Input and Output Data.
\nThe output can be used as the manifest file for another labeling job or as training\n data for your machine learning models.
\nYou can use this operation to create a static labeling job or a streaming labeling\n job. A static labeling job stops if all data objects in the input manifest file\n identified in ManifestS3Uri
have been labeled. A streaming labeling job\n runs perpetually until it is manually stopped, or remains idle for 10 days. You can send\n new data objects to an active (InProgress
) streaming labeling job in real\n time. To learn how to create a static labeling job, see Create a Labeling Job\n (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming\n labeling job, see Create a Streaming Labeling\n Job.
The attribute name to use for the label in the output manifest file. This is the key\n for the key/value pair formed with the label that a worker assigns to the object. The\n LabelAttributeName
must meet the following requirements.
The name can't end with \"-metadata\".
\nIf you are using one of the following built-in task types,\n the attribute name must end with \"-ref\". If the task type\n you are using is not listed below, the attribute name must\n not end with \"-ref\".
\nImage semantic segmentation (SemanticSegmentation)
, and\n adjustment (AdjustmentSemanticSegmentation
) and\n verification (VerificationSemanticSegmentation
) labeling\n jobs for this task type.
Video frame object detection (VideoObjectDetection
), and\n adjustment and verification\n (AdjustmentVideoObjectDetection
) labeling jobs for this\n task type.
Video frame object tracking (VideoObjectTracking
), and\n adjustment and verification (AdjustmentVideoObjectTracking
)\n labeling jobs for this task type.
3D point cloud semantic segmentation\n (3DPointCloudSemanticSegmentation
), and adjustment and\n verification (Adjustment3DPointCloudSemanticSegmentation
)\n labeling jobs for this task type.
3D point cloud object tracking\n (3DPointCloudObjectTracking
), and adjustment and\n verification (Adjustment3DPointCloudObjectTracking
)\n labeling jobs for this task type.
If you are creating an adjustment or verification labeling job, you must use a\n different\n LabelAttributeName
than the one used in the original labeling job. The\n original labeling job is the Ground Truth labeling job that produced the labels that you\n want verified or adjusted. To learn more about adjustment and verification labeling\n jobs, see Verify and Adjust\n Labels.
The attribute name to use for the label in the output manifest file. This is the key\n for the key/value pair formed with the label that a worker assigns to the object. The\n LabelAttributeName
must meet the following requirements.
The name can't end with \"-metadata\".
\nIf you are using one of the following built-in task types,\n the attribute name must end with \"-ref\". If the task type\n you are using is not listed below, the attribute name must\n not end with \"-ref\".
\nImage semantic segmentation (SemanticSegmentation)
, and\n adjustment (AdjustmentSemanticSegmentation
) and\n verification (VerificationSemanticSegmentation
) labeling\n jobs for this task type.
Video frame object detection (VideoObjectDetection
), and\n adjustment and verification\n (AdjustmentVideoObjectDetection
) labeling jobs for this\n task type.
Video frame object tracking (VideoObjectTracking
), and\n adjustment and verification (AdjustmentVideoObjectTracking
)\n labeling jobs for this task type.
3D point cloud semantic segmentation\n (3DPointCloudSemanticSegmentation
), and adjustment and\n verification (Adjustment3DPointCloudSemanticSegmentation
)\n labeling jobs for this task type.
3D point cloud object tracking\n (3DPointCloudObjectTracking
), and adjustment and\n verification (Adjustment3DPointCloudObjectTracking
)\n labeling jobs for this task type.
If you are creating an adjustment or verification labeling job, you must use a\n different\n LabelAttributeName
than the one used in the original labeling job. The\n original labeling job is the Ground Truth labeling job that produced the labels that you\n want verified or adjusted. To learn more about adjustment and verification labeling\n jobs, see Verify and Adjust\n Labels.
Input data for the labeling job, such as the Amazon S3 location of the data objects and the\n location of the manifest file that describes the data objects.
\nYou must specify at least one of the following: S3DataSource
or\n SnsDataSource
.
Use SnsDataSource
to specify an SNS input topic for a streaming\n labeling job. If you do not specify and SNS input topic ARN, Ground Truth will\n create a one-time labeling job that stops after all data objects in the input\n manifest file have been labeled.
Use S3DataSource
to specify an input manifest file for both\n streaming and one-time labeling jobs. Adding an S3DataSource
is\n optional if you use SnsDataSource
to create a streaming labeling\n job.
If you use the Amazon Mechanical Turk workforce, your input data should not include\n confidential information, personal information or protected health information. Use\n ContentClassifiers
to specify that your data is free of personally\n identifiable information and adult content.
Input data for the labeling job, such as the Amazon S3 location of the data objects and the\n location of the manifest file that describes the data objects.
\nYou must specify at least one of the following: S3DataSource
or\n SnsDataSource
.
Use SnsDataSource
to specify an SNS input topic for a streaming\n labeling job. If you do not specify and SNS input topic ARN, Ground Truth will\n create a one-time labeling job that stops after all data objects in the input\n manifest file have been labeled.
Use S3DataSource
to specify an input manifest file for both\n streaming and one-time labeling jobs. Adding an S3DataSource
is\n optional if you use SnsDataSource
to create a streaming labeling\n job.
If you use the Amazon Mechanical Turk workforce, your input data should not include\n confidential information, personal information or protected health information. Use\n ContentClassifiers
to specify that your data is free of personally\n identifiable information and adult content.
The S3 URI of the file, referred to as a label category configuration\n file, that defines the categories used to label the data objects.
\nFor 3D point cloud and video frame task types, you can add label category attributes\n and frame attributes to your label category configuration file. To learn how, see Create a\n Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.
\nFor named entity recognition jobs, in addition to Add Instructions Add additional instructions.\"labels\"
, you must\n provide worker instructions in the label category configuration file using the\n \"instructions\"
parameter: \"instructions\":\n {\"shortInstruction\":\"
. For details\n and an example, see Create a\n Named Entity Recognition Labeling Job (API) .Add header
For all other built-in task types and custom\n tasks, your label category configuration file must be a JSON file in the\n following format. Identify the labels you want to use by replacing label_1
,\n label_2
,...
,label_n
with your label\n categories.
\n {
\n
\n \"document-version\": \"2018-11-28\",
\n
\n \"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\":\n \"label_n\"}]
\n
\n }
\n
Note the following about the label category configuration file:
\nFor image classification and text classification (single and multi-label) you\n must specify at least two label categories. For all other task types, the\n minimum number of label categories required is one.
\nEach label category must be unique, you cannot specify duplicate label\n categories.
\nIf you create a 3D point cloud or video frame adjustment or verification\n labeling job, you must include auditLabelAttributeName
in the label\n category configuration. Use this parameter to enter the \n LabelAttributeName
\n of the labeling job you want to\n adjust or verify annotations of.
The S3 URI of the file, referred to as a label category configuration\n file, that defines the categories used to label the data objects.
\nFor 3D point cloud and video frame task types, you can add label category attributes\n and frame attributes to your label category configuration file. To learn how, see Create a\n Labeling Category Configuration File for 3D Point Cloud Labeling Jobs.
\nFor named entity recognition jobs, in addition to Add Instructions Add additional instructions.\"labels\"
, you must\n provide worker instructions in the label category configuration file using the\n \"instructions\"
parameter: \"instructions\":\n {\"shortInstruction\":\"
. For details\n and an example, see Create a\n Named Entity Recognition Labeling Job (API) .Add header
For all other built-in task types and custom\n tasks, your label category configuration file must be a JSON file in the\n following format. Identify the labels you want to use by replacing label_1
,\n label_2
,...
,label_n
with your label\n categories.
\n {
\n
\n \"document-version\": \"2018-11-28\",
\n
\n \"labels\": [{\"label\": \"label_1\"},{\"label\": \"label_2\"},...{\"label\":\n \"label_n\"}]
\n
\n }
\n
Note the following about the label category configuration file:
\nFor image classification and text classification (single and multi-label) you\n must specify at least two label categories. For all other task types, the\n minimum number of label categories required is one.
\nEach label category must be unique, you cannot specify duplicate label\n categories.
\nIf you create a 3D point cloud or video frame adjustment or verification\n labeling job, you must include auditLabelAttributeName
in the label\n category configuration. Use this parameter to enter the \n LabelAttributeName
\n of the labeling job you want to\n adjust or verify annotations of.
Creates a model in SageMaker. In the request, you name the model and describe a primary\n container. For the primary container, you specify the Docker image that\n contains inference code, artifacts (from prior training), and a custom environment map\n that the inference code uses when you deploy the model for predictions.
\nUse this API to create a model if you want to use SageMaker hosting services or run a batch\n transform job.
\nTo host your model, you create an endpoint configuration with the\n CreateEndpointConfig
API, and then create an endpoint with the\n CreateEndpoint
API. SageMaker then deploys all of the containers that you\n defined for the model in the hosting environment.
For an example that calls this method when deploying a model to SageMaker hosting services,\n see Create a Model (Amazon Web Services SDK for Python (Boto 3)).\n
\nTo run a batch transform using your model, you start a job with the\n CreateTransformJob
API. SageMaker uses your model and your dataset to get\n inferences which are then saved to a specified S3 location.
In the request, you also provide an IAM role that SageMaker can assume to access model\n artifacts and docker image for deployment on ML compute hosting instances or for batch\n transform jobs. In addition, you also use the IAM role to manage permissions the\n inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.
" + "smithy.api#documentation": "Creates a model in SageMaker. In the request, you name the model and describe a primary\n container. For the primary container, you specify the Docker image that\n contains inference code, artifacts (from prior training), and a custom environment map\n that the inference code uses when you deploy the model for predictions.
\nUse this API to create a model if you want to use SageMaker hosting services or run a batch\n transform job.
\nTo host your model, you create an endpoint configuration with the\n CreateEndpointConfig
API, and then create an endpoint with the\n CreateEndpoint
API. SageMaker then deploys all of the containers that you\n defined for the model in the hosting environment.
For an example that calls this method when deploying a model to SageMaker hosting services,\n see Create a Model (Amazon Web Services SDK for Python (Boto 3)).\n
\nTo run a batch transform using your model, you start a job with the\n CreateTransformJob
API. SageMaker uses your model and your dataset to get\n inferences which are then saved to a specified S3 location.
In the request, you also provide an IAM role that SageMaker can assume to access model\n artifacts and docker image for deployment on ML compute hosting instances or for batch\n transform jobs. In addition, you also use the IAM role to manage permissions the\n inference code needs. For example, if the inference code access any other Amazon Web Services resources, you grant necessary permissions via this role.
" } }, "com.amazonaws.sagemaker#CreateModelBiasJobDefinition": { @@ -8360,7 +8263,7 @@ "ExecutionRoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model\n artifacts and docker image for deployment on ML compute instances or for batch transform\n jobs. Deploying on ML compute instances is part of model hosting. For more information,\n see SageMaker\n Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access model\n artifacts and docker image for deployment on ML compute instances or for batch transform\n jobs. Deploying on ML compute instances is part of model hosting. For more information,\n see SageMaker\n Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
Creates a model package that you can use to create SageMaker models or list on Amazon Web Services\n Marketplace, or a versioned model that is part of a model group. Buyers can subscribe to\n model packages listed on Amazon Web Services Marketplace to create models in SageMaker.
\nTo create a model package by specifying a Docker container that contains your\n inference code and the Amazon S3 location of your model artifacts, provide values for\n InferenceSpecification
. To create a model from an algorithm resource\n that you created or subscribed to in Amazon Web Services Marketplace, provide a value for\n SourceAlgorithmSpecification
.
There are two types of model packages:
\nVersioned - a model that is part of a model group in the model\n registry.
\nUnversioned - a model package that is not part of a model group.
\nCreates a model package that you can use to create SageMaker models or list on Amazon Web Services\n Marketplace, or a versioned model that is part of a model group. Buyers can subscribe to\n model packages listed on Amazon Web Services Marketplace to create models in SageMaker.
\nTo create a model package by specifying a Docker container that contains your\n inference code and the Amazon S3 location of your model artifacts, provide values for\n InferenceSpecification
. To create a model from an algorithm resource\n that you created or subscribed to in Amazon Web Services Marketplace, provide a value for\n SourceAlgorithmSpecification
.
There are two types of model packages:
\nVersioned - a model that is part of a model group in the model\n registry.
\nUnversioned - a model package that is not part of a model group.
\nThe name of the model package. The name must have 1 to 63 characters. Valid characters\n are a-z, A-Z, 0-9, and - (hyphen).
\nThis parameter is required for unversioned models. It is not applicable to versioned\n models.
" + "smithy.api#documentation": "The name of the model package. The name must have 1 to 63 characters. Valid characters\n are a-z, A-Z, 0-9, and - (hyphen).
\nThis parameter is required for unversioned models. It is not applicable to versioned\n models.
" } }, "ModelPackageGroupName": { "target": "com.amazonaws.sagemaker#ArnOrName", "traits": { - "smithy.api#documentation": "The name or Amazon Resource Name (ARN) of the model package group that this model version belongs to.
\nThis parameter is required for versioned models, and does not apply to unversioned\n models.
" + "smithy.api#documentation": "The name or Amazon Resource Name (ARN) of the model package group that this model version belongs to.
\nThis parameter is required for versioned models, and does not apply to unversioned\n models.
" } }, "ModelPackageDescription": { @@ -8494,7 +8397,7 @@ "InferenceSpecification": { "target": "com.amazonaws.sagemaker#InferenceSpecification", "traits": { - "smithy.api#documentation": "Specifies details about inference jobs that can be run with models based on this model\n package, including the following:
\nThe Amazon ECR paths of containers that contain the inference code and model\n artifacts.
\nThe instance types that the model package supports for transform jobs and\n real-time endpoints used for inference.
\nThe input and output content formats that the model package supports for\n inference.
\nSpecifies details about inference jobs that can be run with models based on this model\n package, including the following:
\nThe Amazon ECR paths of containers that contain the inference code and model\n artifacts.
\nThe instance types that the model package supports for transform jobs and\n real-time endpoints used for inference.
\nThe input and output content formats that the model package supports for\n inference.
\nWhether to certify the model package for listing on Amazon Web Services Marketplace.
\nThis parameter is optional for unversioned models, and does not apply to versioned\n models.
" + "smithy.api#documentation": "Whether to certify the model package for listing on Amazon Web Services Marketplace.
\nThis parameter is optional for unversioned models, and does not apply to versioned\n models.
" } }, "Tags": { @@ -8525,7 +8428,7 @@ "ModelApprovalStatus": { "target": "com.amazonaws.sagemaker#ModelApprovalStatus", "traits": { - "smithy.api#documentation": "Whether the model is approved for deployment.
\nThis parameter is optional for versioned models, and does not apply to unversioned\n models.
\nFor versioned models, the value of this parameter must be set to Approved
\n to deploy the model.
Whether the model is approved for deployment.
\nThis parameter is optional for versioned models, and does not apply to unversioned\n models.
\nFor versioned models, the value of this parameter must be set to Approved
\n to deploy the model.
Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML)\n compute instance running on a Jupyter notebook.
\nIn a CreateNotebookInstance
request, specify the type of ML compute\n instance that you want to run. SageMaker launches the instance, installs common libraries\n that you can use to explore datasets for model training, and attaches an ML storage\n volume to the notebook instance.
SageMaker also provides a set of example notebooks. Each notebook demonstrates how to\n use SageMaker with a specific algorithm or with a machine learning framework.
\nAfter receiving the request, SageMaker does the following:
\nCreates a network interface in the SageMaker VPC.
\n(Option) If you specified SubnetId
, SageMaker creates a network\n interface in your own VPC, which is inferred from the subnet ID that you provide\n in the input. When creating this network interface, SageMaker attaches the security\n group that you specified in the request to the network interface that it creates\n in your VPC.
Launches an EC2 instance of the type specified in the request in the SageMaker\n VPC. If you specified SubnetId
of your VPC, SageMaker specifies both\n network interfaces when launching this instance. This enables inbound traffic\n from your own VPC to the notebook instance, assuming that the security groups\n allow it.
After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN).\n You can't change the name of a notebook instance after you create it.
\nAfter SageMaker creates the notebook instance, you can connect to the Jupyter server and\n work in Jupyter notebooks. For example, you can write code to explore a dataset that you\n can use for model training, train a model, host models by creating SageMaker endpoints, and\n validate hosted models.
\nFor more information, see How It Works.
" + "smithy.api#documentation": "Creates an SageMaker notebook instance. A notebook instance is a machine learning (ML)\n compute instance running on a Jupyter notebook.
\nIn a CreateNotebookInstance
request, specify the type of ML compute\n instance that you want to run. SageMaker launches the instance, installs common libraries\n that you can use to explore datasets for model training, and attaches an ML storage\n volume to the notebook instance.
SageMaker also provides a set of example notebooks. Each notebook demonstrates how to\n use SageMaker with a specific algorithm or with a machine learning framework.
\nAfter receiving the request, SageMaker does the following:
\nCreates a network interface in the SageMaker VPC.
\n(Option) If you specified SubnetId
, SageMaker creates a network\n interface in your own VPC, which is inferred from the subnet ID that you provide\n in the input. When creating this network interface, SageMaker attaches the security\n group that you specified in the request to the network interface that it creates\n in your VPC.
Launches an EC2 instance of the type specified in the request in the SageMaker\n VPC. If you specified SubnetId
of your VPC, SageMaker specifies both\n network interfaces when launching this instance. This enables inbound traffic\n from your own VPC to the notebook instance, assuming that the security groups\n allow it.
After creating the notebook instance, SageMaker returns its Amazon Resource Name (ARN).\n You can't change the name of a notebook instance after you create it.
\nAfter SageMaker creates the notebook instance, you can connect to the Jupyter server and\n work in Jupyter notebooks. For example, you can write code to explore a dataset that you\n can use for model training, train a model, host models by creating SageMaker endpoints, and\n validate hosted models.
\nFor more information, see How It Works.
" } }, "com.amazonaws.sagemaker#CreateNotebookInstanceInput": { @@ -8798,7 +8701,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "When you send any requests to Amazon Web Services resources from the notebook\n instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this\n role necessary permissions so SageMaker can perform these tasks. The policy must allow the\n SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For\n more information, see SageMaker Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
When you send any requests to Amazon Web Services resources from the notebook\n instance, SageMaker assumes this role to perform tasks on your behalf. You must grant this\n role necessary permissions so SageMaker can perform these tasks. The policy must allow the\n SageMaker service principal (sagemaker.amazonaws.com) permissions to assume this role. For\n more information, see SageMaker Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
Sets whether SageMaker provides internet access to the notebook instance. If you set this\n to Disabled
this notebook instance is able to access resources only in your\n VPC, and is not be able to connect to SageMaker training and endpoint services unless you\n configure a NAT Gateway in your VPC.
For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value\n of this parameter to Disabled
only if you set a value for the\n SubnetId
parameter.
Sets whether SageMaker provides internet access to the notebook instance. If you set this\n to Disabled
this notebook instance is able to access resources only in your\n VPC, and is not be able to connect to SageMaker training and endpoint services unless you\n configure a NAT Gateway in your VPC.
For more information, see Notebook Instances Are Internet-Enabled by Default. You can set the value\n of this parameter to Disabled
only if you set a value for the\n SubnetId
parameter.
Whether root access is enabled or disabled for users of the notebook instance. The\n default value is Enabled
.
Lifecycle configurations need root access to be able to set up a notebook\n instance. Because of this, lifecycle configurations associated with a notebook\n instance always run with root access even if you disable root access for\n users.
\nWhether root access is enabled or disabled for users of the notebook instance. The\n default value is Enabled
.
Lifecycle configurations need root access to be able to set up a notebook\n instance. Because of this, lifecycle configurations associated with a notebook\n instance always run with root access even if you disable root access for\n users.
\nCreates a lifecycle configuration that you can associate with a notebook instance. A\n lifecycle configuration is a collection of shell scripts that\n run when you create or start a notebook instance.
\nEach lifecycle configuration script has a limit of 16384 characters.
\nThe value of the $PATH
environment variable that is available to both\n scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group\n /aws/sagemaker/NotebookInstances
in log stream\n [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs\n for longer than 5 minutes, it fails and the notebook instance is not created or\n started.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" + "smithy.api#documentation": "Creates a lifecycle configuration that you can associate with a notebook instance. A\n lifecycle configuration is a collection of shell scripts that\n run when you create or start a notebook instance.
\nEach lifecycle configuration script has a limit of 16384 characters.
\nThe value of the $PATH
environment variable that is available to both\n scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group\n /aws/sagemaker/NotebookInstances
in log stream\n [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs\n for longer than 5 minutes, it fails and the notebook instance is not created or\n started.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" } }, "com.amazonaws.sagemaker#CreateNotebookInstanceLifecycleConfigInput": { @@ -9101,7 +9004,7 @@ "target": "com.amazonaws.sagemaker#CreatePresignedNotebookInstanceUrlOutput" }, "traits": { - "smithy.api#documentation": "Returns a URL that you can use to connect to the Jupyter server from a notebook\n instance. In the SageMaker console, when you choose Open
next to a notebook\n instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook\n instance. The console uses this API to get the URL and show the page.
The IAM role or user used to call this API defines the permissions to access the\n notebook instance. Once the presigned URL is created, no additional permission is\n required to access this URL. IAM authorization policies for this API are also enforced\n for every HTTP request and WebSocket frame that attempts to connect to the notebook\n instance.
\nYou can restrict access to this API and to the URL that it returns to a list of IP\n addresses that you specify. Use the NotIpAddress
condition operator and the\n aws:SourceIP
condition context key to specify the list of IP addresses\n that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.
The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If\n you try to use the URL after the 5-minute limit expires, you are directed to the\n Amazon Web Services console sign-in page.
\nReturns a URL that you can use to connect to the Jupyter server from a notebook\n instance. In the SageMaker console, when you choose Open
next to a notebook\n instance, SageMaker opens a new tab showing the Jupyter server home page from the notebook\n instance. The console uses this API to get the URL and show the page.
The IAM role or user used to call this API defines the permissions to access the\n notebook instance. Once the presigned URL is created, no additional permission is\n required to access this URL. IAM authorization policies for this API are also enforced\n for every HTTP request and WebSocket frame that attempts to connect to the notebook\n instance.
\nYou can restrict access to this API and to the URL that it returns to a list of IP\n addresses that you specify. Use the NotIpAddress
condition operator and the\n aws:SourceIP
condition context key to specify the list of IP addresses\n that you want to have access to the notebook instance. For more information, see Limit Access to a Notebook Instance by IP Address.
The URL that you get from a call to CreatePresignedNotebookInstanceUrl is valid only for 5 minutes. If\n you try to use the URL after the 5-minute limit expires, you are directed to the\n Amazon Web Services console sign-in page.
\nStarts a model training job. After training completes, SageMaker saves the resulting\n model artifacts to an Amazon S3 location that you specify.
\nIf you choose to host your model using SageMaker hosting services, you can use the\n resulting model artifacts as part of the model. You can also use the artifacts in a\n machine learning service other than SageMaker, provided that you know how to use them for\n inference. \n
\nIn the request body, you provide the following:
\n\n AlgorithmSpecification
- Identifies the training algorithm to\n use.\n
\n HyperParameters
- Specify these algorithm-specific parameters to\n enable the estimation of model parameters during training. Hyperparameters can\n be tuned to optimize this learning process. For a list of hyperparameters for\n each training algorithm provided by SageMaker, see Algorithms.
Do not include any security-sensitive information including account access\n IDs, secrets or tokens in any hyperparameter field. If the use of\n security-sensitive credentials are detected, SageMaker will reject your training\n job request and return an exception error.
\n\n InputDataConfig
- Describes the input required by the training\n job and the Amazon S3, EFS, or FSx location where it is stored.
\n OutputDataConfig
- Identifies the Amazon S3 bucket where you want\n SageMaker to save the results of model training.
\n ResourceConfig
- Identifies the resources, ML compute\n instances, and ML storage volumes to deploy for model training. In distributed\n training, you specify more than one instance.
\n EnableManagedSpotTraining
- Optimize the cost of training machine\n learning models by up to 80% by using Amazon EC2 Spot instances. For more\n information, see Managed Spot\n Training.
\n RoleArn
- The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on\n your behalf during model training.\n \n You must grant this role the necessary permissions so that SageMaker can successfully\n complete model training.
\n StoppingCondition
- To help cap training costs, use\n MaxRuntimeInSeconds
to set a time limit for training. Use\n MaxWaitTimeInSeconds
to specify how long a managed spot\n training job has to complete.
\n Environment
- The environment variables to set in the Docker\n container.
\n RetryStrategy
- The number of times to retry the job when the job\n fails due to an InternalServerError
.
For more information about SageMaker, see How It Works.
" + "smithy.api#documentation": "Starts a model training job. After training completes, SageMaker saves the resulting\n model artifacts to an Amazon S3 location that you specify.
\nIf you choose to host your model using SageMaker hosting services, you can use the\n resulting model artifacts as part of the model. You can also use the artifacts in a\n machine learning service other than SageMaker, provided that you know how to use them for\n inference. \n
\nIn the request body, you provide the following:
\n\n AlgorithmSpecification
- Identifies the training algorithm to\n use.\n
\n HyperParameters
- Specify these algorithm-specific parameters to\n enable the estimation of model parameters during training. Hyperparameters can\n be tuned to optimize this learning process. For a list of hyperparameters for\n each training algorithm provided by SageMaker, see Algorithms.
Do not include any security-sensitive information including account access\n IDs, secrets or tokens in any hyperparameter field. If the use of\n security-sensitive credentials are detected, SageMaker will reject your training\n job request and return an exception error.
\n\n InputDataConfig
- Describes the input required by the training job and the Amazon S3,\n EFS, or FSx location where it is stored.
\n OutputDataConfig
- Identifies the Amazon S3 bucket where you want\n SageMaker to save the results of model training.
\n ResourceConfig
- Identifies the resources, ML compute\n instances, and ML storage volumes to deploy for model training. In distributed\n training, you specify more than one instance.
\n EnableManagedSpotTraining
- Optimize the cost of training machine\n learning models by up to 80% by using Amazon EC2 Spot instances. For more\n information, see Managed Spot\n Training.
\n RoleArn
- The Amazon Resource Name (ARN) that SageMaker assumes to perform tasks on\n your behalf during model training.\n \n You must grant this role the necessary permissions so that SageMaker can successfully\n complete model training.
\n StoppingCondition
- To help cap training costs, use\n MaxRuntimeInSeconds
to set a time limit for training. Use\n MaxWaitTimeInSeconds
to specify how long a managed spot\n training job has to complete.
\n Environment
- The environment variables to set in the Docker\n container.
\n RetryStrategy
- The number of times to retry the job when the job\n fails due to an InternalServerError
.
For more information about SageMaker, see How It Works.
" } }, "com.amazonaws.sagemaker#CreateTrainingJobRequest": { @@ -9465,7 +9368,7 @@ "HyperParameters": { "target": "com.amazonaws.sagemaker#HyperParameters", "traits": { - "smithy.api#documentation": "Algorithm-specific parameters that influence the quality of the model. You set\n hyperparameters before you start the learning process. For a list of hyperparameters for\n each training algorithm provided by SageMaker, see Algorithms.
\nYou can specify a maximum of 100 hyperparameters. Each hyperparameter is a\n key-value pair. Each key and value is limited to 256 characters, as specified by the\n Length Constraint
.
Do not include any security-sensitive information including account access IDs,\n secrets or tokens in any hyperparameter field. If the use of security-sensitive\n credentials are detected, SageMaker will reject your training job request and return an\n exception error.
\nAlgorithm-specific parameters that influence the quality of the model. You set\n hyperparameters before you start the learning process. For a list of hyperparameters for\n each training algorithm provided by SageMaker, see Algorithms.
\nYou can specify a maximum of 100 hyperparameters. Each hyperparameter is a\n key-value pair. Each key and value is limited to 256 characters, as specified by the\n Length Constraint
.
Do not include any security-sensitive information including account access IDs,\n secrets or tokens in any hyperparameter field. If the use of security-sensitive\n credentials are detected, SageMaker will reject your training job request and return an\n exception error.
\nThe Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform\n tasks on your behalf.
\nDuring model training, SageMaker needs your permission to read input data from an S3\n bucket, download a Docker image that contains training code, write model artifacts to an\n S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant\n permissions for all of these tasks to an IAM role. For more information, see SageMaker\n Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
The Amazon Resource Name (ARN) of an IAM role that SageMaker can assume to perform\n tasks on your behalf.
\nDuring model training, SageMaker needs your permission to read input data from an S3\n bucket, download a Docker image that contains training code, write model artifacts to an\n S3 bucket, write logs to Amazon CloudWatch Logs, and publish metrics to Amazon CloudWatch. You grant\n permissions for all of these tasks to an IAM role. For more information, see SageMaker\n Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
An array of Channel
objects. Each channel is a named input source.\n InputDataConfig
describes the input data and its location.
Algorithms can accept input data from one or more channels. For example, an\n algorithm might have two channels of input data, training_data
and\n validation_data
. The configuration for each channel provides the S3,\n EFS, or FSx location where the input data is stored. It also provides information about\n the stored data: the MIME type, compression method, and whether the data is wrapped in\n RecordIO format.
Depending on the input mode that the algorithm supports, SageMaker either copies input\n data files from an S3 bucket to a local directory in the Docker container, or makes it\n available as input streams. For example, if you specify an EFS location, input data\n files are available as input streams. They do not need to be downloaded.
" + "smithy.api#documentation": "An array of Channel
objects. Each channel is a named input source.\n InputDataConfig
describes the input data and its location.
Algorithms can accept input data from one or more channels. For example, an\n algorithm might have two channels of input data, training_data
and\n validation_data
. The configuration for each channel provides the S3,\n EFS, or FSx location where the input data is stored. It also provides information about\n the stored data: the MIME type, compression method, and whether the data is wrapped in\n RecordIO format.
Depending on the input mode that the algorithm supports, SageMaker either copies input\n data files from an S3 bucket to a local directory in the Docker container, or makes it\n available as input streams. For example, if you specify an EFS location, input data\n files are available as input streams. They do not need to be downloaded.
" } }, "OutputDataConfig": { @@ -9498,7 +9401,7 @@ "ResourceConfig": { "target": "com.amazonaws.sagemaker#ResourceConfig", "traits": { - "smithy.api#documentation": "The resources, including the ML compute instances and ML storage volumes, to use\n for model training.
\nML storage volumes store model artifacts and incremental states. Training\n algorithms might also use ML storage volumes for scratch space. If you want SageMaker to use\n the ML storage volume to store the training data, choose File
as the\n TrainingInputMode
in the algorithm specification. For distributed\n training algorithms, specify an instance count greater than 1.
The resources, including the ML compute instances and ML storage volumes, to use\n for model training.
\nML storage volumes store model artifacts and incremental states. Training\n algorithms might also use ML storage volumes for scratch space. If you want SageMaker to use\n the ML storage volume to store the training data, choose File
as the\n TrainingInputMode
in the algorithm specification. For distributed\n training algorithms, specify an instance count greater than 1.
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays\n job termination for 120 seconds. Algorithms can use this 120-second window to save the\n model artifacts, so the results of training are not lost.
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays\n job termination for 120 seconds. Algorithms can use this 120-second window to save the\n model artifacts, so the results of training are not lost.
To train models using managed spot training, choose True
. Managed spot\n training provides a fully managed and scalable infrastructure for training machine\n learning models. this option is useful when training jobs can be interrupted and when\n there is flexibility when the training job is run.
The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be\n used as a starting point to train models incrementally. Amazon SageMaker provides metrics and\n logs in CloudWatch. They can be used to see when managed spot training jobs are running,\n interrupted, resumed, or completed.
" + "smithy.api#documentation": "To train models using managed spot training, choose True
. Managed spot\n training provides a fully managed and scalable infrastructure for training machine\n learning models. this option is useful when training jobs can be interrupted and when\n there is flexibility when the training job is run.
The complete and intermediate results of jobs are stored in an Amazon S3 bucket, and can be\n used as a starting point to train models incrementally. Amazon SageMaker provides metrics and\n logs in CloudWatch. They can be used to see when managed spot training jobs are running,\n interrupted, resumed, or completed.
" } }, "CheckpointConfig": { @@ -9618,7 +9521,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a transform job. A transform job uses a trained model to get inferences on a\n dataset and saves these results to an Amazon S3 location that you specify.
\nTo perform batch transformations, you create a transform job and use the data that you\n have readily available.
\nIn the request body, you provide the following:
\n\n TransformJobName
- Identifies the transform job. The name must be\n unique within an Amazon Web Services Region in an Amazon Web Services account.
\n ModelName
- Identifies the model to use. ModelName
\n must be the name of an existing Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services\n\t\t account. For information on creating a model, see CreateModel.
\n TransformInput
- Describes the dataset to be transformed and the\n Amazon S3 location where it is stored.
\n TransformOutput
- Identifies the Amazon S3 location where you want\n Amazon SageMaker to save the results from the transform job.
\n TransformResources
- Identifies the ML compute instances for the\n transform job.
For more information about how batch transformation works, see Batch\n Transform.
" + "smithy.api#documentation": "Starts a transform job. A transform job uses a trained model to get inferences on a\n dataset and saves these results to an Amazon S3 location that you specify.
\nTo perform batch transformations, you create a transform job and use the data that you\n have readily available.
\nIn the request body, you provide the following:
\n\n TransformJobName
- Identifies the transform job. The name must be\n unique within an Amazon Web Services Region in an Amazon Web Services account.
\n ModelName
- Identifies the model to use. ModelName
\n must be the name of an existing Amazon SageMaker model in the same Amazon Web Services Region and Amazon Web Services\n\t\t account. For information on creating a model, see CreateModel.
\n TransformInput
- Describes the dataset to be transformed and the\n Amazon S3 location where it is stored.
\n TransformOutput
- Identifies the Amazon S3 location where you want\n Amazon SageMaker to save the results from the transform job.
\n TransformResources
- Identifies the ML compute instances for the\n transform job.
For more information about how batch transformation works, see Batch\n Transform.
" } }, "com.amazonaws.sagemaker#CreateTransformJobRequest": { @@ -9653,13 +9556,13 @@ "MaxPayloadInMB": { "target": "com.amazonaws.sagemaker#MaxPayloadInMB", "traits": { - "smithy.api#documentation": "The maximum allowed size of the payload, in MB. A payload is the\n data portion of a record (without metadata). The value in MaxPayloadInMB
\n must be greater than, or equal to, the size of a single record. To estimate the size of\n a record in MB, divide the size of your dataset by the number of records. To ensure that\n the records fit within the maximum payload size, we recommend using a slightly larger\n value. The default value is 6
MB.\n
The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify\n the MaxConcurrentTransforms
parameter, the value of\n (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100\n MB.
For cases where the payload might be arbitrarily large and is transmitted using HTTP\n chunked encoding, set the value to 0
.\n This\n feature works only in supported algorithms. Currently, Amazon SageMaker built-in\n algorithms do not support HTTP chunked encoding.
The maximum allowed size of the payload, in MB. A payload is the\n data portion of a record (without metadata). The value in MaxPayloadInMB
\n must be greater than, or equal to, the size of a single record. To estimate the size of\n a record in MB, divide the size of your dataset by the number of records. To ensure that\n the records fit within the maximum payload size, we recommend using a slightly larger\n value. The default value is 6
MB.\n
The value of MaxPayloadInMB
cannot be greater than 100 MB. If you specify\n the MaxConcurrentTransforms
parameter, the value of\n (MaxConcurrentTransforms * MaxPayloadInMB)
also cannot exceed 100\n MB.
For cases where the payload might be arbitrarily large and is transmitted using HTTP\n chunked encoding, set the value to 0
.\n This\n feature works only in supported algorithms. Currently, Amazon SageMaker built-in\n algorithms do not support HTTP chunked encoding.
Specifies the number of records to include in a mini-batch for an HTTP inference\n request. A record\n is a single unit of input data that\n inference can be made on. For example, a single line in a CSV file is a record.
\nTo enable the batch strategy, you must set the SplitType
property to\n Line
, RecordIO
, or TFRecord
.
To use only one record when making an HTTP invocation request to a container, set\n BatchStrategy
to SingleRecord
and SplitType
\n to Line
.
To fit as many records in a mini-batch as can fit within the\n MaxPayloadInMB
limit, set BatchStrategy
to\n MultiRecord
and SplitType
to Line
.
Specifies the number of records to include in a mini-batch for an HTTP inference\n request. A record\n is a single unit of input data that\n inference can be made on. For example, a single line in a CSV file is a record.
\nTo enable the batch strategy, you must set the SplitType
property to\n Line
, RecordIO
, or TFRecord
.
To use only one record when making an HTTP invocation request to a container, set\n BatchStrategy
to SingleRecord
and SplitType
\n to Line
.
To fit as many records in a mini-batch as can fit within the\n MaxPayloadInMB
limit, set BatchStrategy
to\n MultiRecord
and SplitType
to Line
.
Use this operation to create a workforce. This operation will return an error\n if a workforce already exists in the Amazon Web Services Region that you specify. You can only\n create one workforce in each Amazon Web Services Region per Amazon Web Services account.
\nIf you want to create a new workforce in an Amazon Web Services Region where \n a workforce already exists, use the API\n operation to delete the existing workforce and then use CreateWorkforce
\n to create a new workforce.
To create a private workforce using Amazon Cognito, you must specify a Cognito user pool\n in CognitoConfig
.\n You can also create an Amazon Cognito workforce using the Amazon SageMaker console. \n For more information, see \n \n Create a Private Workforce (Amazon Cognito).
To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP\n configuration in OidcConfig
. Your OIDC IdP must support groups\n because groups are used by Ground Truth and Amazon A2I to create work teams. \n For more information, see \n Create a Private Workforce (OIDC IdP).
Use this operation to create a workforce. This operation will return an error\n if a workforce already exists in the Amazon Web Services Region that you specify. You can only\n create one workforce in each Amazon Web Services Region per Amazon Web Services account.
\n\nIf you want to create a new workforce in an Amazon Web Services Region where \n a workforce already exists, use the API\n operation to delete the existing workforce and then use CreateWorkforce
\n to create a new workforce.
To create a private workforce using Amazon Cognito, you must specify a Cognito user pool\n in CognitoConfig
.\n You can also create an Amazon Cognito workforce using the Amazon SageMaker console. \n For more information, see \n \n Create a Private Workforce (Amazon Cognito).
To create a private workforce using your own OIDC Identity Provider (IdP), specify your IdP\n configuration in OidcConfig
. Your OIDC IdP must support groups\n because groups are used by Ground Truth and Amazon A2I to create work teams. \n For more information, see \n Create a Private Workforce (OIDC IdP).
Use this parameter to configure an Amazon Cognito private workforce.\n A single Cognito workforce is created using and corresponds to a single\n \n Amazon Cognito user pool.
\nDo not use OidcConfig
if you specify values for \n CognitoConfig
.
Use this parameter to configure an Amazon Cognito private workforce.\n A single Cognito workforce is created using and corresponds to a single\n \n Amazon Cognito user pool.
\n \nDo not use OidcConfig
if you specify values for \n CognitoConfig
.
Creates a new work team for labeling your data. A work team is defined by one or more\n Amazon Cognito user pools. You must first create the user pools before you can create a work\n team.
\nYou cannot create more than 25 work teams in an account and region.
" + "smithy.api#documentation": "Creates a new work team for labeling your data. A work team is defined by one or more\n Amazon Cognito user pools. You must first create the user pools before you can create a work\n team.
\nYou cannot create more than 25 work teams in an account and region.
" } }, "com.amazonaws.sagemaker#CreateWorkteamRequest": { @@ -10056,7 +9959,7 @@ "MemberDefinitions": { "target": "com.amazonaws.sagemaker#MemberDefinitions", "traits": { - "smithy.api#documentation": "A list of MemberDefinition
objects that contains objects that identify\n the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For\n private workforces created using Amazon Cognito use CognitoMemberDefinition
. For\n workforces created using your own OIDC identity provider (IdP) use\n OidcMemberDefinition
. Do not provide input for both of these parameters\n in a single request.
For workforces created using Amazon Cognito, private work teams correspond to Amazon Cognito\n user groups within the user pool used to create a workforce. All of the\n CognitoMemberDefinition
objects that make up the member definition must\n have the same ClientId
and UserPool
values. To add a Amazon\n Cognito user group to an existing worker pool, see Adding groups to a User\n Pool. For more information about user pools, see Amazon Cognito User\n Pools.
For workforces created using your own OIDC IdP, specify the user groups that you want to \n include in your private work team in OidcMemberDefinition
by listing those groups\n in Groups
.
A list of MemberDefinition
objects that contains objects that identify\n the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). For\n private workforces created using Amazon Cognito use CognitoMemberDefinition
. For\n workforces created using your own OIDC identity provider (IdP) use\n OidcMemberDefinition
. Do not provide input for both of these parameters\n in a single request.
For workforces created using Amazon Cognito, private work teams correspond to Amazon Cognito\n user groups within the user pool used to create a workforce. All of the\n CognitoMemberDefinition
objects that make up the member definition must\n have the same ClientId
and UserPool
values. To add a Amazon\n Cognito user group to an existing worker pool, see Adding groups to a User\n Pool. For more information about user pools, see Amazon Cognito User\n Pools.
For workforces created using your own OIDC IdP, specify the user groups that you want to \n include in your private work team in OidcMemberDefinition
by listing those groups\n in Groups
.
An array of key-value pairs.
\nFor more information, see Resource\n Tag and Using\n Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User\n Guide.
" + "smithy.api#documentation": "An array of key-value pairs.
\nFor more information, see Resource\n Tag and Using\n Cost Allocation Tags in the Amazon Web Services Billing and Cost Management User\n Guide.
" } } } @@ -10225,7 +10128,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt the\n captured data at rest using Amazon S3 server-side encryption.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that SageMaker uses to encrypt data on\n the storage volume attached to the ML compute instance that hosts the endpoint.
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
A JSONPath expression used to select a portion of the input data to pass to\n the algorithm. Use the InputFilter
parameter to exclude fields, such as an\n ID column, from the input. If you want SageMaker to pass the entire input dataset to the\n algorithm, accept the default value $
.
Examples: \"$\"
, \"$[1:]\"
, \"$.features\"
\n
A JSONPath expression used to select a portion of the input data to pass to\n the algorithm. Use the InputFilter
parameter to exclude fields, such as an\n ID column, from the input. If you want SageMaker to pass the entire input dataset to the\n algorithm, accept the default value $
.
Examples: \"$\"
, \"$[1:]\"
, \"$.features\"
\n
A JSONPath expression used to select a portion of the joined dataset to save\n in the output file for a batch transform job. If you want SageMaker to store the entire input\n dataset in the output file, leave the default value, $
. If you specify\n indexes that aren't within the dimension size of the joined dataset, you get an\n error.
Examples: \"$\"
, \"$[0,5:]\"
,\n \"$['id','SageMakerOutput']\"
\n
A JSONPath expression used to select a portion of the joined dataset to save\n in the output file for a batch transform job. If you want SageMaker to store the entire input\n dataset in the output file, leave the default value, $
. If you specify\n indexes that aren't within the dimension size of the joined dataset, you get an\n error.
Examples: \"$\"
, \"$[0,5:]\"
,\n \"$['id','SageMakerOutput']\"
\n
Specifies the source of the data to join with the transformed data. The valid values\n are None
and Input
. The default value is None
,\n which specifies not to join the input with the transformed data. If you want the batch\n transform job to join the original input data with the transformed data, set\n JoinSource
to Input
. You can specify\n OutputFilter
as an additional filter to select a portion of the joined\n dataset and store it in the output file.
For JSON or JSONLines objects, such as a JSON array, SageMaker adds the transformed data to\n the input JSON object in an attribute called SageMakerOutput
. The joined\n result for JSON must be a key-value pair object. If the input is not a key-value pair\n object, SageMaker creates a new JSON file. In the new JSON file, and the input data is stored\n under the SageMakerInput
key and the results are stored in\n SageMakerOutput
.
For CSV data, SageMaker takes each row as a JSON array and joins the transformed data with\n the input by appending each transformed row to the end of the input. The joined data has\n the original input data followed by the transformed data and the output is a CSV\n file.
\nFor information on how joining in applied, see Workflow for Associating Inferences with Input Records.
" + "smithy.api#documentation": "Specifies the source of the data to join with the transformed data. The valid values\n are None
and Input
. The default value is None
,\n which specifies not to join the input with the transformed data. If you want the batch\n transform job to join the original input data with the transformed data, set\n JoinSource
to Input
. You can specify\n OutputFilter
as an additional filter to select a portion of the joined\n dataset and store it in the output file.
For JSON or JSONLines objects, such as a JSON array, SageMaker adds the transformed data to\n the input JSON object in an attribute called SageMakerOutput
. The joined\n result for JSON must be a key-value pair object. If the input is not a key-value pair\n object, SageMaker creates a new JSON file. In the new JSON file, and the input data is stored\n under the SageMakerInput
key and the results are stored in\n SageMakerOutput
.
For CSV data, SageMaker takes each row as a JSON array and joins the transformed data with\n the input by appending each transformed row to the end of the input. The joined data has\n the original input data followed by the transformed data and the output is a CSV\n file.
\nFor information on how joining in applied, see Workflow for Associating Inferences with Input Records.
" } } }, @@ -10844,7 +10747,7 @@ "UserProfileName": { "target": "com.amazonaws.sagemaker#UserProfileName", "traits": { - "smithy.api#documentation": "The user profile name. If this value is not set, then SpaceName
must be set.
The user profile name.
" } }, "AppType": { @@ -10864,7 +10767,7 @@ "SpaceName": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "The name of the space. If this value is not set, then UserProfileName
must be set.
The name of the space.
" } } } @@ -11201,7 +11104,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": "Deletes an endpoint. SageMaker frees up all of the resources that were deployed when the\n endpoint was created.
\nSageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't\n need to use the RevokeGrant API call.
\nWhen you delete your endpoint, SageMaker asynchronously deletes associated endpoint\n resources such as KMS key grants. You might still see these resources in your account\n for a few minutes after deleting your endpoint. Do not delete or revoke the permissions\n for your \n ExecutionRoleArn\n
, otherwise SageMaker cannot delete these\n resources.
Deletes an endpoint. SageMaker frees up all of the resources that were deployed when the\n endpoint was created.
\nSageMaker retires any custom KMS key grants associated with the endpoint, meaning you don't\n need to use the RevokeGrant API call.
\nWhen you delete your endpoint, SageMaker asynchronously deletes associated endpoint\n resources such as KMS key grants. You might still see these resources in your account\n for a few minutes after deleting your endpoint. Do not delete or revoke the permissions\n for your \n ExecutionRoleArn\n
, otherwise SageMaker cannot delete these\n resources.
Deletes an endpoint configuration. The DeleteEndpointConfig
API\n deletes only the specified configuration. It does not delete endpoints created using the\n configuration.
You must not delete an EndpointConfig
in use by an endpoint that is\n live or while the UpdateEndpoint
or CreateEndpoint
operations\n are being performed on the endpoint. If you delete the EndpointConfig
of an\n endpoint that is active or being created or updated you may lose visibility into the\n instance type the endpoint is using. The endpoint must be deleted in order to stop\n incurring charges.
Deletes an endpoint configuration. The DeleteEndpointConfig
API\n deletes only the specified configuration. It does not delete endpoints created using the\n configuration.
You must not delete an EndpointConfig
in use by an endpoint that is\n live or while the UpdateEndpoint
or CreateEndpoint
operations\n are being performed on the endpoint. If you delete the EndpointConfig
of an\n endpoint that is active or being created or updated you may lose visibility into the\n instance type the endpoint is using. The endpoint must be deleted in order to stop\n incurring charges.
The name of the image to delete.
", + "smithy.api#documentation": "The name of the image.
", "smithy.api#required": {} } }, "Version": { "target": "com.amazonaws.sagemaker#ImageVersionNumber", "traits": { - "smithy.api#documentation": "The version to delete.
" - } - }, - "Alias": { - "target": "com.amazonaws.sagemaker#SageMakerImageVersionAlias", - "traits": { - "smithy.api#documentation": "The alias of the image to delete.
" + "smithy.api#documentation": "The version to delete.
", + "smithy.api#required": {} } } } @@ -11564,7 +11462,7 @@ } ], "traits": { - "smithy.api#documentation": "Deletes an inference experiment.
\n\n This operation does not delete your endpoint, variants, or any underlying resources. This operation only\n deletes the metadata of your experiment.\n
\nDeletes an inference experiment.
\n\n This operation does not delete your endpoint, variants, or any underlying resources. This operation only\n deletes the metadata of your experiment.\n
\nDeletes a model package.
\nA model package is used to create SageMaker models or list on Amazon Web Services Marketplace. Buyers can\n subscribe to model packages listed on Amazon Web Services Marketplace to create models in SageMaker.
" + "smithy.api#documentation": "Deletes a model package.
\nA model package is used to create SageMaker models or list on Amazon Web Services Marketplace. Buyers can\n subscribe to model packages listed on Amazon Web Services Marketplace to create models in SageMaker.
" } }, "com.amazonaws.sagemaker#DeleteModelPackageGroup": { @@ -11781,7 +11679,7 @@ "ModelPackageName": { "target": "com.amazonaws.sagemaker#VersionedArnOrName", "traits": { - "smithy.api#documentation": "The name or Amazon Resource Name (ARN) of the model package to delete.
\nWhen you specify a name, the name must have 1 to 63 characters. Valid\n characters are a-z, A-Z, 0-9, and - (hyphen).
", + "smithy.api#documentation": "The name or Amazon Resource Name (ARN) of the model package to delete.
\nWhen you specify a name, the name must have 1 to 63 characters. Valid\n characters are a-z, A-Z, 0-9, and - (hyphen).
", "smithy.api#required": {} } } @@ -11854,7 +11752,7 @@ "target": "smithy.api#Unit" }, "traits": { - "smithy.api#documentation": " Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you\n must call the StopNotebookInstance
API.
When you delete a notebook instance, you lose all of your data. SageMaker removes\n the ML compute instance, and deletes the ML storage volume and the network interface\n associated with the notebook instance.
\n Deletes an SageMaker notebook instance. Before you can delete a notebook instance, you\n must call the StopNotebookInstance
API.
When you delete a notebook instance, you lose all of your data. SageMaker removes\n the ML compute instance, and deletes the ML storage volume and the network interface\n associated with the notebook instance.
\nDeletes the specified tags from an SageMaker resource.
\nTo list a resource's tags, use the ListTags
API.
When you call this API to delete tags from a hyperparameter tuning job, the\n deleted tags are not removed from training jobs that the hyperparameter tuning job\n launched before you called this API.
\nWhen you call this API to delete tags from a SageMaker Studio Domain or User\n Profile, the deleted tags are not removed from Apps that the SageMaker Studio Domain\n or User Profile launched before you called this API.
\nDeletes the specified tags from an SageMaker resource.
\nTo list a resource's tags, use the ListTags
API.
When you call this API to delete tags from a hyperparameter tuning job, the\n deleted tags are not removed from training jobs that the hyperparameter tuning job\n launched before you called this API.
\nWhen you call this API to delete tags from a SageMaker Studio Domain or User\n Profile, the deleted tags are not removed from Apps that the SageMaker Studio Domain\n or User Profile launched before you called this API.
\nUse this operation to delete a workforce.
\nIf you want to create a new workforce in an Amazon Web Services Region where\n a workforce already exists, use this operation to delete the \n existing workforce and then use \n to create a new workforce.
\nIf a private workforce contains one or more work teams, you must use \n the \n operation to delete all work teams before you delete the workforce.\n If you try to delete a workforce that contains one or more work teams,\n you will recieve a ResourceInUse
error.
Use this operation to delete a workforce.
\n\nIf you want to create a new workforce in an Amazon Web Services Region where\n a workforce already exists, use this operation to delete the \n existing workforce and then use \n to create a new workforce.
\nIf a private workforce contains one or more work teams, you must use \n the \n operation to delete all work teams before you delete the workforce.\n If you try to delete a workforce that contains one or more work teams,\n you will recieve a ResourceInUse
error.
Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.
\nIf you used the registry/repository[:tag]
form to specify the image path\n of the primary container when you created the model hosted in this\n ProductionVariant
, the path resolves to a path of the form\n registry/repository[@digest]
. A digest is a hash value that identifies\n a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.
Gets the Amazon EC2 Container Registry path of the docker image of the model that is hosted in this ProductionVariant.
\nIf you used the registry/repository[:tag]
form to specify the image path\n of the primary container when you created the model hosted in this\n ProductionVariant
, the path resolves to a path of the form\n registry/repository[@digest]
. A digest is a hash value that identifies\n a specific version of an image. For information about Amazon ECR paths, see Pulling an Image in the Amazon ECR User Guide.
The user profile name. If this value is not set, then SpaceName
must be set.
The user profile name.
" } }, "AppType": { @@ -12850,7 +12748,7 @@ "SpaceName": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "The name of the space. If this value is not set, then UserProfileName
must be set.
The name of the space.
" } } } @@ -13066,7 +12964,7 @@ "BestCandidate": { "target": "com.amazonaws.sagemaker#AutoMLCandidate", "traits": { - "smithy.api#documentation": "The best model candidate selected by SageMaker Autopilot using both the best objective metric and\n lowest InferenceLatency for\n an experiment.
" + "smithy.api#documentation": "Returns the job's best AutoMLCandidate
.
Returns information about a model compilation job.
\nTo create a model compilation job, use CreateCompilationJob. To get\n information about multiple model compilation jobs, use ListCompilationJobs.
" + "smithy.api#documentation": "Returns information about a model compilation job.
\nTo create a model compilation job, use CreateCompilationJob. To get\n information about multiple model compilation jobs, use ListCompilationJobs.
" } }, "com.amazonaws.sagemaker#DescribeCompilationJobRequest": { @@ -13235,7 +13133,7 @@ "CompilationStartTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "The time when the model compilation job started the CompilationJob
\n instances.
You are billed for the time between this timestamp and the timestamp in the DescribeCompilationJobResponse$CompilationEndTime field. In Amazon CloudWatch Logs,\n the start time might be later than this time. That's because it takes time to download\n the compilation job, which depends on the size of the compilation job container.
" + "smithy.api#documentation": "The time when the model compilation job started the CompilationJob
\n instances.
You are billed for the time between this timestamp and the timestamp in the DescribeCompilationJobResponse$CompilationEndTime field. In Amazon CloudWatch Logs,\n the start time might be later than this time. That's because it takes time to download\n the compilation job, which depends on the size of the compilation job container.
" } }, "CompilationEndTime": { @@ -14286,7 +14184,7 @@ "ShadowProductionVariants": { "target": "com.amazonaws.sagemaker#ProductionVariantList", "traits": { - "smithy.api#documentation": "An array of ProductionVariant
objects, one for each model that you want\n to host at this endpoint in shadow mode with production traffic replicated from the\n model specified on ProductionVariants
.
\n Array of ProductionVariant
objects. There is one for each model that you want to host at this\n endpoint in shadow mode with production traffic replicated from the model specified on\n ProductionVariants
.If you use this field, you can only specify one variant for\n ProductionVariants
and one variant for ShadowProductionVariants
.\n
An array of ProductionVariantSummary objects, one for each model\n hosted behind this endpoint.
" + "smithy.api#documentation": "An array of ProductionVariantSummary objects, one for each model\n hosted behind this endpoint.
" } }, "DataCaptureConfig": { @@ -14339,7 +14237,7 @@ "EndpointStatus": { "target": "com.amazonaws.sagemaker#EndpointStatus", "traits": { - "smithy.api#documentation": "The status of the endpoint.
\n\n OutOfService
: Endpoint is not available to take incoming\n requests.
\n Creating
: CreateEndpoint is executing.
\n Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
\n SystemUpdating
: Endpoint is undergoing maintenance and cannot be\n updated or deleted or re-scaled until it has completed. This maintenance\n operation does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.
\n RollingBack
: Endpoint fails to scale up or down or change its\n variant weight and is in the process of rolling back to its previous\n configuration. Once the rollback completes, endpoint returns to an\n InService
status. This transitional status only applies to an\n endpoint that has autoscaling enabled and is undergoing variant weight or\n capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called\n explicitly.
\n InService
: Endpoint is available to process incoming\n requests.
\n Deleting
: DeleteEndpoint is executing.
\n Failed
: Endpoint could not be created, updated, or re-scaled. Use\n DescribeEndpointOutput$FailureReason for information about\n the failure. DeleteEndpoint is the only operation that can be\n performed on a failed endpoint.
The status of the endpoint.
\n\n OutOfService
: Endpoint is not available to take incoming\n requests.
\n Creating
: CreateEndpoint is executing.
\n Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
\n SystemUpdating
: Endpoint is undergoing maintenance and cannot be\n updated or deleted or re-scaled until it has completed. This maintenance\n operation does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.
\n RollingBack
: Endpoint fails to scale up or down or change its\n variant weight and is in the process of rolling back to its previous\n configuration. Once the rollback completes, endpoint returns to an\n InService
status. This transitional status only applies to an\n endpoint that has autoscaling enabled and is undergoing variant weight or\n capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called\n explicitly.
\n InService
: Endpoint is available to process incoming\n requests.
\n Deleting
: DeleteEndpoint is executing.
\n Failed
: Endpoint could not be created, updated, or re-scaled. Use\n DescribeEndpointOutput$FailureReason for information about\n the failure. DeleteEndpoint is the only operation that can be\n performed on a failed endpoint.
An array of ProductionVariantSummary objects, one for each model\n that you want to host at this endpoint in shadow mode with production traffic\n replicated from the model specified on ProductionVariants
.
\n Array of ProductionVariant
objects. There is one for each model that you want to host at this\n endpoint in shadow mode with production traffic replicated from the model specified on\n ProductionVariants
.If you use this field, you can only specify one variant for\n ProductionVariants
and one variant for ShadowProductionVariants
.\n
The configuration of the offline store. It includes the following configurations:
\nAmazon S3 location of the offline store.
\nConfiguration of the Glue data catalog.
\nTable format of the offline store.
\nOption to disable the automatic creation of a Glue table for the offline store.
\nEncryption configuration.
\nThe configuration of the OfflineStore
, inducing the S3 location of the\n OfflineStore
, Amazon Web Services Glue or Amazon Web Services Hive data catalogue configurations, and the\n security configuration.
The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the OfflineStore if an OfflineStoreConfig is provided.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM execution role used to persist data into the\n OfflineStore
if an OfflineStoreConfig
is provided.
The ARN of the image.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image.
" } }, "ImageName": { @@ -15462,7 +15360,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The ARN of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that enables Amazon SageMaker to perform tasks on your behalf.
" } } } @@ -15562,12 +15460,6 @@ "traits": { "smithy.api#documentation": "The version of the image. If not specified, the latest version is described.
" } - }, - "Alias": { - "target": "com.amazonaws.sagemaker#SageMakerImageVersionAlias", - "traits": { - "smithy.api#documentation": "The alias of the image version.
" - } } } }, @@ -15601,7 +15493,7 @@ "ImageArn": { "target": "com.amazonaws.sagemaker#ImageArn", "traits": { - "smithy.api#documentation": "The ARN of the image the version is based on.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image the version is based on.
" } }, "ImageVersionArn": { @@ -15627,49 +15519,6 @@ "traits": { "smithy.api#documentation": "The version number.
" } - }, - "VendorGuidance": { - "target": "com.amazonaws.sagemaker#VendorGuidance", - "traits": { - "smithy.api#documentation": "The stability of the image version specified by the maintainer.
\n\n NOT_PROVIDED
: The maintainers did not provide a status for image version stability.
\n STABLE
: The image version is stable.
\n TO_BE_ARCHIVED
: The image version is set to be archived. Custom image versions that are set to be archived are automatically archived after three months.
\n ARCHIVED
: The image version is archived. Archived image versions are not searchable and are no longer actively supported.
Indicates SageMaker job type compatibility.
\n\n TRAINING
: The image version is compatible with SageMaker training jobs.
\n INFERENCE
: The image version is compatible with SageMaker inference jobs.
\n NOTEBOOK_KERNEL
: The image version is compatible with SageMaker notebook kernels.
The machine learning framework vended in the image version.
" - } - }, - "ProgrammingLang": { - "target": "com.amazonaws.sagemaker#ProgrammingLang", - "traits": { - "smithy.api#documentation": "The supported programming language and its version.
" - } - }, - "Processor": { - "target": "com.amazonaws.sagemaker#Processor", - "traits": { - "smithy.api#documentation": "Indicates CPU or GPU compatibility.
\n\n CPU
: The image version is compatible with CPU.
\n GPU
: The image version is compatible with GPU.
Indicates Horovod compatibility.
" - } - }, - "ReleaseNotes": { - "target": "com.amazonaws.sagemaker#ReleaseNotes", - "traits": { - "smithy.api#documentation": "The maintainer description of the image version.
" - } } } }, @@ -15735,14 +15584,14 @@ "Status": { "target": "com.amazonaws.sagemaker#InferenceExperimentStatus", "traits": { - "smithy.api#documentation": "\n The status of the inference experiment. The following are the possible statuses for an inference\n experiment:\n
\n\n Creating
- Amazon SageMaker is creating your experiment.\n
\n Created
- Amazon SageMaker has finished the creation of your experiment and will begin the\n experiment at the scheduled time.\n
\n Updating
- When you make changes to your experiment, your experiment shows as updating.\n
\n Starting
- Amazon SageMaker is beginning your experiment.\n
\n Running
- Your experiment is in progress.\n
\n Stopping
- Amazon SageMaker is stopping your experiment.\n
\n Completed
- Your experiment has completed.\n
\n Cancelled
- When you conclude your experiment early using the StopInferenceExperiment API, or if any operation fails with an unexpected error, it shows\n as cancelled.\n
\n The status of the inference experiment. The following are the possible statuses for an inference\n experiment:\n
\n\n Creating
- Amazon SageMaker is creating your experiment.\n
\n Created
- Amazon SageMaker has finished creating your experiment and it will begin at the scheduled time.\n
\n Updating
- When you make changes to your experiment, your experiment shows as updating.\n
\n Starting
- Amazon SageMaker is beginning your experiment.\n
\n Running
- Your experiment is in progress.\n
\n Stopping
- Amazon SageMaker is stopping your experiment.\n
\n Completed
- Your experiment has completed.\n
\n Cancelled
- When you conclude your experiment early, it shows as canceled.\n
\n The error message or client-specified Reason
from the StopInferenceExperiment\n API, that explains the status of the inference experiment.\n
The error message for the inference experiment status result.
" } }, "Description": { @@ -15760,7 +15609,7 @@ "CompletionTime": { "target": "com.amazonaws.sagemaker#Timestamp", "traits": { - "smithy.api#documentation": "\n The timestamp at which the inference experiment was completed.\n
" + "smithy.api#documentation": "\n The timestamp at which the inference experiment was completed or will complete.\n
" } }, "LastModifiedTime": { @@ -15772,7 +15621,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "\n The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage\n Amazon SageMaker Inference endpoints for model deployment.\n
" + "smithy.api#documentation": "\n The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images.\n
" } }, "EndpointMetadata": { @@ -15785,20 +15634,20 @@ "ModelVariants": { "target": "com.amazonaws.sagemaker#ModelVariantConfigSummaryList", "traits": { - "smithy.api#documentation": "\n An array of ModelVariantConfigSummary
objects. There is one for each variant in the inference\n experiment. Each ModelVariantConfigSummary
object in the array describes the infrastructure\n configuration for deploying the corresponding variant.\n
\n Array of ModelVariantConfigSummary
objects. There is one for each variant in the inference\n experiment. Each ModelVariantConfigSummary
object in the array describes the infrastructure\n configuration for deploying the corresponding variant.\n
The Amazon S3 location and configuration for storing inference request and response data.
" + "smithy.api#documentation": "The Amazon S3 storage configuration for the inference experiment.
" } }, "ShadowModeConfig": { "target": "com.amazonaws.sagemaker#ShadowModeConfig", "traits": { - "smithy.api#documentation": "\n The configuration of ShadowMode
inference experiment type, which shows the production variant\n that takes all the inference requests, and the shadow variant to which Amazon SageMaker replicates a percentage of the\n inference requests. For the shadow variant it also shows the percentage of requests that Amazon SageMaker replicates.\n
\n Shows which variant is a production variant and which variant is a shadow variant. For shadow\n variants, also shows the sampling percentage.\n
" } }, "KmsKey": { @@ -16052,7 +15901,7 @@ "LabelCategoryConfigS3Uri": { "target": "com.amazonaws.sagemaker#S3Uri", "traits": { - "smithy.api#documentation": "The S3 location of the JSON file that defines the categories used to label data\n objects. Please note the following label-category limits:
\nSemantic segmentation labeling jobs using automated labeling: 20 labels
\nBox bounding labeling jobs (all): 10 labels
\nThe file is a JSON structure in the following format:
\n\n {
\n
\n \"document-version\": \"2018-11-28\"
\n
\n \"labels\": [
\n
\n {
\n
\n \"label\": \"label 1\"
\n
\n },
\n
\n {
\n
\n \"label\": \"label 2\"
\n
\n },
\n
\n ...
\n
\n {
\n
\n \"label\": \"label n\"
\n
\n }
\n
\n ]
\n
\n }
\n
The S3 location of the JSON file that defines the categories used to label data\n objects. Please note the following label-category limits:
\nSemantic segmentation labeling jobs using automated labeling: 20 labels
\nBox bounding labeling jobs (all): 10 labels
\nThe file is a JSON structure in the following format:
\n\n {
\n
\n \"document-version\": \"2018-11-28\"
\n
\n \"labels\": [
\n
\n {
\n
\n \"label\": \"label 1\"
\n
\n },
\n
\n {
\n
\n \"label\": \"label 2\"
\n
\n },
\n
\n ...
\n
\n {
\n
\n \"label\": \"label n\"
\n
\n }
\n
\n ]
\n
\n }
\n
Returns a description of the specified model package, which is used to create SageMaker\n models or list them on Amazon Web Services Marketplace.
\nTo create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services\n Marketplace.
" + "smithy.api#documentation": "Returns a description of the specified model package, which is used to create SageMaker\n models or list them on Amazon Web Services Marketplace.
\nTo create models in SageMaker, buyers can subscribe to model packages listed on Amazon Web Services\n Marketplace.
" } }, "com.amazonaws.sagemaker#DescribeModelPackageGroup": { @@ -16759,7 +16608,7 @@ "ModelPackageName": { "target": "com.amazonaws.sagemaker#VersionedArnOrName", "traits": { - "smithy.api#documentation": "The name or Amazon Resource Name (ARN) of the model package to describe.
\nWhen you specify a name, the name must have 1 to 63 characters. Valid\n characters are a-z, A-Z, 0-9, and - (hyphen).
", + "smithy.api#documentation": "The name or Amazon Resource Name (ARN) of the model package to describe.
\nWhen you specify a name, the name must have 1 to 63 characters. Valid\n characters are a-z, A-Z, 0-9, and - (hyphen).
", "smithy.api#required": {} } } @@ -17229,7 +17078,7 @@ "target": "com.amazonaws.sagemaker#DescribeNotebookInstanceLifecycleConfigOutput" }, "traits": { - "smithy.api#documentation": "Returns a description of a notebook instance lifecycle configuration.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" + "smithy.api#documentation": "Returns a description of a notebook instance lifecycle configuration.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" } }, "com.amazonaws.sagemaker#DescribeNotebookInstanceLifecycleConfigInput": { @@ -17369,13 +17218,13 @@ "NotebookInstanceLifecycleConfigName": { "target": "com.amazonaws.sagemaker#NotebookInstanceLifecycleConfigName", "traits": { - "smithy.api#documentation": "Returns the name of a notebook instance lifecycle configuration.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance\n
" + "smithy.api#documentation": "Returns the name of a notebook instance lifecycle configuration.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance\n
" } }, "DirectInternetAccess": { "target": "com.amazonaws.sagemaker#DirectInternetAccess", "traits": { - "smithy.api#documentation": "Describes whether SageMaker provides internet access to the notebook instance. If this\n value is set to Disabled, the notebook instance does not have\n internet access, and cannot connect to SageMaker training and endpoint services.
\nFor more information, see Notebook Instances Are Internet-Enabled by Default.
" + "smithy.api#documentation": "Describes whether SageMaker provides internet access to the notebook instance. If this\n value is set to Disabled, the notebook instance does not have\n internet access, and cannot connect to SageMaker training and endpoint services.
\nFor more information, see Notebook Instances Are Internet-Enabled by Default.
" } }, "VolumeSizeInGB": { @@ -17405,7 +17254,7 @@ "RootAccess": { "target": "com.amazonaws.sagemaker#RootAccess", "traits": { - "smithy.api#documentation": "Whether root access is enabled or disabled for users of the notebook instance.
\nLifecycle configurations need root access to be able to set up a notebook\n instance. Because of this, lifecycle configurations associated with a notebook\n instance always run with root access even if you disable root access for\n users.
\nWhether root access is enabled or disabled for users of the notebook instance.
\nLifecycle configurations need root access to be able to set up a notebook\n instance. Because of this, lifecycle configurations associated with a notebook\n instance always run with root access even if you disable root access for\n users.
\nReturns information about a training job.
\nSome of the attributes below only appear if the training job successfully starts.\n If the training job fails, TrainingJobStatus
is Failed
and,\n depending on the FailureReason
, attributes like\n TrainingStartTime
, TrainingTimeInSeconds
,\n TrainingEndTime
, and BillableTimeInSeconds
may not be\n present in the response.
Returns information about a training job.
\nSome of the attributes below only appear if the training job successfully starts.\n If the training job fails, TrainingJobStatus
is Failed
and,\n depending on the FailureReason
, attributes like\n TrainingStartTime
, TrainingTimeInSeconds
,\n TrainingEndTime
, and BillableTimeInSeconds
may not be\n present in the response.
The status of the training job.
\nSageMaker provides the following training job statuses:
\n\n InProgress
- The training is in progress.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. To see the reason for the\n failure, see the FailureReason
field in the response to a\n DescribeTrainingJobResponse
call.
\n Stopping
- The training job is stopping.
\n Stopped
- The training job has stopped.
For more detailed information, see SecondaryStatus
.
The status of the training job.
\nSageMaker provides the following training job statuses:
\n\n InProgress
- The training is in progress.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. To see the reason for the\n failure, see the FailureReason
field in the response to a\n DescribeTrainingJobResponse
call.
\n Stopping
- The training job is stopping.
\n Stopped
- The training job has stopped.
For more detailed information, see SecondaryStatus
.
Provides detailed information about the state of the training job. For detailed\n information on the secondary status of the training job, see StatusMessage
\n under SecondaryStatusTransition.
SageMaker provides primary statuses and secondary statuses that apply to each of\n them:
\n\n Starting
\n - Starting the training job.
\n Downloading
- An optional stage for algorithms that\n support File
training input mode. It indicates that\n data is being downloaded to the ML storage volumes.
\n Training
- Training is in progress.
\n Interrupted
- The job stopped because the managed\n spot training instances were interrupted.
\n Uploading
- Training is complete and the model\n artifacts are being uploaded to the S3 location.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. The reason for\n the failure is returned in the FailureReason
field of\n DescribeTrainingJobResponse
.
\n MaxRuntimeExceeded
- The job stopped because it\n exceeded the maximum allowed runtime.
\n MaxWaitTimeExceeded
- The job stopped because it\n exceeded the maximum allowed wait time.
\n Stopped
- The training job has stopped.
\n Stopping
- Stopping the training job.
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
\n\n LaunchingMLInstances
\n
\n PreparingTraining
\n
\n DownloadingTrainingImage
\n
Provides detailed information about the state of the training job. For detailed\n information on the secondary status of the training job, see StatusMessage
\n under SecondaryStatusTransition.
SageMaker provides primary statuses and secondary statuses that apply to each of\n them:
\n\n Starting
\n - Starting the training job.
\n Downloading
- An optional stage for algorithms that\n support File
training input mode. It indicates that\n data is being downloaded to the ML storage volumes.
\n Training
- Training is in progress.
\n Interrupted
- The job stopped because the managed\n spot training instances were interrupted.
\n Uploading
- Training is complete and the model\n artifacts are being uploaded to the S3 location.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. The reason for\n the failure is returned in the FailureReason
field of\n DescribeTrainingJobResponse
.
\n MaxRuntimeExceeded
- The job stopped because it\n exceeded the maximum allowed runtime.
\n MaxWaitTimeExceeded
- The job stopped because it\n exceeded the maximum allowed wait time.
\n Stopped
- The training job has stopped.
\n Stopping
- Stopping the training job.
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
\n\n LaunchingMLInstances
\n
\n PreparingTraining
\n
\n DownloadingTrainingImage
\n
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays\n job termination for 120 seconds. Algorithms can use this 120-second window to save the\n model artifacts, so the results of training are not lost.
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays\n job termination for 120 seconds. Algorithms can use this 120-second window to save the\n model artifacts, so the results of training are not lost.
The billable time in seconds. Billable time refers to the absolute wall-clock\n time.
\nMultiply BillableTimeInSeconds
by the number of instances\n (InstanceCount
) in your training cluster to get the total compute time\n SageMaker bills you if you run distributed training. The formula is as follows:\n BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula\n (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example,\n if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is\n 500, the savings is 80%.
The billable time in seconds. Billable time refers to the absolute wall-clock\n time.
\nMultiply BillableTimeInSeconds
by the number of instances\n (InstanceCount
) in your training cluster to get the total compute time\n SageMaker bills you if you run distributed training. The formula is as follows:\n BillableTimeInSeconds * InstanceCount
.
You can calculate the savings from using managed spot training using the formula\n (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100
. For example,\n if BillableTimeInSeconds
is 100 and TrainingTimeInSeconds
is\n 500, the savings is 80%.
Specifies the number of records to include in a mini-batch for an HTTP inference\n request.\n A record\n is a single unit of input data that inference\n can be made on. For example, a single line in a CSV file is a record.
\nTo enable the batch strategy, you must set SplitType
\n to\n Line
, RecordIO
, or\n TFRecord
.
Specifies the number of records to include in a mini-batch for an HTTP inference\n request.\n A record\n is a single unit of input data that inference\n can be made on. For example, a single line in a CSV file is a record.
\nTo enable the batch strategy, you must set SplitType
\n to\n Line
, RecordIO
, or\n TFRecord
.
A list of ARNs and, if applicable, job types for multiple sources of an experiment\n run.
" + "smithy.api#documentation": "A list of the Amazon Resource Name (ARN) and, if applicable, job type for multiple sources of an experiment run.
" } } } @@ -19055,7 +18904,7 @@ "target": "com.amazonaws.sagemaker#DescribeWorkforceResponse" }, "traits": { - "smithy.api#documentation": "Lists private workforce information, including workforce name, Amazon Resource Name\n (ARN), and, if applicable, allowed IP address ranges (CIDRs). Allowable IP address\n ranges are the IP addresses that workers can use to access tasks.
\nThis operation applies only to private workforces.
\nLists private workforce information, including workforce name, Amazon Resource Name\n (ARN), and, if applicable, allowed IP address ranges (CIDRs). Allowable IP address\n ranges are the IP addresses that workers can use to access tasks.
\nThis operation applies only to private workforces.
\nThe configuration for attaching a SageMaker user profile name to the execution role as a\n sts:SourceIdentity key. This configuration can only be modified if there\n are no apps in the InService
or Pending
state.
The security groups for the Amazon Virtual Private Cloud that the Domain
uses for\n communication between Domain-level apps and user apps.
A list of the shadow variants hosted on the endpoint. Each shadow variant is a model\n in shadow mode with production traffic replicated from the proudction variant.
" + "smithy.api#documentation": "\n Array of ProductionVariant
objects, one for each model that you want to host at this endpoint\n in shadow mode with production traffic replicated from the model specified on\n ProductionVariants
.If you use this field, you can only specify one variant for\n ProductionVariants
and one variant for ShadowProductionVariants
.\n
\n If the status of the endpoint is Failed
, or the status is InService
but update\n operation fails, this provides the reason why it failed.\n
\n If the status of the endpoint is Failed
, this provides the reason why it failed.\n
The status of the endpoint.
\n\n OutOfService
: Endpoint is not available to take incoming\n requests.
\n Creating
: CreateEndpoint is executing.
\n Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
\n SystemUpdating
: Endpoint is undergoing maintenance and cannot be\n updated or deleted or re-scaled until it has completed. This maintenance\n operation does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.
\n RollingBack
: Endpoint fails to scale up or down or change its\n variant weight and is in the process of rolling back to its previous\n configuration. Once the rollback completes, endpoint returns to an\n InService
status. This transitional status only applies to an\n endpoint that has autoscaling enabled and is undergoing variant weight or\n capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called\n explicitly.
\n InService
: Endpoint is available to process incoming\n requests.
\n Deleting
: DeleteEndpoint is executing.
\n Failed
: Endpoint could not be created, updated, or re-scaled. Use\n DescribeEndpointOutput$FailureReason for information about\n the failure. DeleteEndpoint is the only operation that can be\n performed on a failed endpoint.
To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter.
", + "smithy.api#documentation": "The status of the endpoint.
\n\n OutOfService
: Endpoint is not available to take incoming\n requests.
\n Creating
: CreateEndpoint is executing.
\n Updating
: UpdateEndpoint or UpdateEndpointWeightsAndCapacities is executing.
\n SystemUpdating
: Endpoint is undergoing maintenance and cannot be\n updated or deleted or re-scaled until it has completed. This maintenance\n operation does not change any customer-specified values such as VPC config, KMS\n encryption, model, instance type, or instance count.
\n RollingBack
: Endpoint fails to scale up or down or change its\n variant weight and is in the process of rolling back to its previous\n configuration. Once the rollback completes, endpoint returns to an\n InService
status. This transitional status only applies to an\n endpoint that has autoscaling enabled and is undergoing variant weight or\n capacity changes as part of an UpdateEndpointWeightsAndCapacities call or when the UpdateEndpointWeightsAndCapacities operation is called\n explicitly.
\n InService
: Endpoint is available to process incoming\n requests.
\n Deleting
: DeleteEndpoint is executing.
\n Failed
: Endpoint could not be created, updated, or re-scaled. Use\n DescribeEndpointOutput$FailureReason for information about\n the failure. DeleteEndpoint is the only operation that can be\n performed on a failed endpoint.
To get a list of endpoints with a specified status, use the ListEndpointsInput$StatusEquals filter.
", "smithy.api#required": {} } } @@ -21581,7 +21424,7 @@ "ExperimentName": { "target": "com.amazonaws.sagemaker#ExperimentEntityName", "traits": { - "smithy.api#documentation": "The name of an existing experiment to associate with the trial component.
" + "smithy.api#documentation": "The name of an existing experiment to associate the trial component with.
" } }, "TrialName": { @@ -21599,7 +21442,7 @@ "RunName": { "target": "com.amazonaws.sagemaker#ExperimentEntityName", "traits": { - "smithy.api#documentation": "The name of the experiment run to associate with the trial component.
" + "smithy.api#documentation": "The name of the experiment run to associate the trial component with.
" } } }, @@ -21872,7 +21715,7 @@ "RecordIdentifierFeatureName": { "target": "com.amazonaws.sagemaker#FeatureName", "traits": { - "smithy.api#documentation": "The name of the Feature
whose value uniquely identifies a\n Record
defined in the FeatureGroup
\n FeatureDefinitions
.
The name of the Feature
whose value uniquely identifies a\n Record
defined in the FeatureGroup
\n FeatureDefinitions
.
A conditional statement for a search expression that includes a resource property, a\n Boolean operator, and a value. Resources that match the statement are returned in the\n results from the Search API.
\nIf you specify a Value
, but not an Operator
, Amazon SageMaker uses the\n equals operator.
In search, there are several property types:
\nTo define a metric filter, enter a value using the form\n \"Metrics.
, where
is\n a metric name. For example, the following filter searches for training jobs\n with an \"accuracy\"
metric greater than\n \"0.9\"
:
\n {
\n
\n \"Name\": \"Metrics.accuracy\",
\n
\n \"Operator\": \"GreaterThan\",
\n
\n \"Value\": \"0.9\"
\n
\n }
\n
To define a hyperparameter filter, enter a value with the form\n \"HyperParameters.
. Decimal hyperparameter\n values are treated as a decimal in a comparison if the specified\n Value
is also a decimal value. If the specified\n Value
is an integer, the decimal hyperparameter values are\n treated as integers. For example, the following filter is satisfied by\n training jobs with a \"learning_rate\"
hyperparameter that is\n less than \"0.5\"
:
\n {
\n
\n \"Name\": \"HyperParameters.learning_rate\",
\n
\n \"Operator\": \"LessThan\",
\n
\n \"Value\": \"0.5\"
\n
\n }
\n
To define a tag filter, enter a value with the form\n Tags.
.
A conditional statement for a search expression that includes a resource property, a\n Boolean operator, and a value. Resources that match the statement are returned in the\n results from the Search API.
\n \nIf you specify a Value
, but not an Operator
, Amazon SageMaker uses the\n equals operator.
In search, there are several property types:
\nTo define a metric filter, enter a value using the form\n \"Metrics.
, where
is\n a metric name. For example, the following filter searches for training jobs\n with an \"accuracy\"
metric greater than\n \"0.9\"
:
\n {
\n
\n \"Name\": \"Metrics.accuracy\",
\n
\n \"Operator\": \"GreaterThan\",
\n
\n \"Value\": \"0.9\"
\n
\n }
\n
To define a hyperparameter filter, enter a value with the form\n \"HyperParameters.
. Decimal hyperparameter\n values are treated as a decimal in a comparison if the specified\n Value
is also a decimal value. If the specified\n Value
is an integer, the decimal hyperparameter values are\n treated as integers. For example, the following filter is satisfied by\n training jobs with a \"learning_rate\"
hyperparameter that is\n less than \"0.5\"
:
\n {
\n
\n \"Name\": \"HyperParameters.learning_rate\",
\n
\n \"Operator\": \"LessThan\",
\n
\n \"Value\": \"0.5\"
\n
\n }
\n
To define a tag filter, enter a value with the form\n Tags.
.
The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that\n contains the credentials used to access the git repository. The secret must have a\n staging label of AWSCURRENT
and must be in the following format:
\n {\"username\": UserName, \"password\":\n Password}
\n
The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that\n contains the credentials used to access the git repository. The secret must have a\n staging label of AWSCURRENT
and must be in the following format:
\n {\"username\": UserName, \"password\":\n Password}
\n
The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that\n contains the credentials used to access the git repository. The secret must have a\n staging label of AWSCURRENT
and must be in the following format:
\n {\"username\": UserName, \"password\":\n Password}
\n
The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that\n contains the credentials used to access the git repository. The secret must have a\n staging label of AWSCURRENT
and must be in the following format:
\n {\"username\": UserName, \"password\":\n Password}
\n
The Amazon Resource Name (ARN) of a Lambda function that is run before a data object\n is sent to a human worker. Use this function to provide input to a custom labeling\n job.
\nFor built-in\n task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n PreHumanTaskLambdaArn
. For custom labeling workflows, see Pre-annotation Lambda.
\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox
\n
\n Image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass
\n
\n Multi-label image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel
\n
\n Semantic segmentation - Treats each pixel in an image as\n a multi-class classification and treats pixel annotations from workers as\n \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation
\n
\n Text classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of text based on annotations\n from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass
\n
\n Multi-label text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel
\n
\n Named entity recognition - Groups similar selections and\n calculates aggregate boundaries, resolving to most-assigned label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition
\n
\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass
\n
\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection
\n
\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking
\n
\n 3D Point Cloud Modalities\n
\nUse the following pre-annotation lambdas for 3D point cloud labeling modality tasks.\n See 3D Point Cloud Task types\n to learn more.
\n\n 3D Point Cloud Object Detection - \n Use this task type when you want workers to classify objects in a 3D point cloud by \n drawing 3D cuboids around objects. For example, you can use this task type to ask workers \n to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking - \n Use this task type when you want workers to draw 3D cuboids around objects\n that appear in a sequence of 3D point cloud frames. \n For example, you can use this task type to ask workers to track \n the movement of vehicles across multiple point cloud frames.\n
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation - \n Use this task type when you want workers to create a point-level semantic segmentation masks by \n painting objects in a 3D point cloud using different colors where each color is assigned to one of \n the classes you specify.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation
\n
\n Use the following ARNs for Label Verification and Adjustment Jobs\n
\nUse label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .
\n\n Bounding box verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification\n judgement for bounding box labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox
\n
\n Bounding box adjustment - Finds the most similar boxes\n from different workers based on the Jaccard index of the adjusted\n annotations.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox
\n
\n Semantic segmentation verification - Uses a variant of\n the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation
\n
\n Semantic segmentation adjustment - Treats each pixel in\n an image as a multi-class classification and treats pixel adjusted annotations\n from workers as \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation
\n
\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection
\n
\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking
\n
\n 3D point cloud object detection adjustment - Adjust\n 3D cuboids in a point cloud frame.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n 3D point cloud object tracking adjustment - Adjust 3D\n cuboids across a sequence of point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n 3D point cloud semantic segmentation adjustment -\n Adjust semantic segmentation masks in a 3D point cloud.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
The Amazon Resource Name (ARN) of a Lambda function that is run before a data object\n is sent to a human worker. Use this function to provide input to a custom labeling\n job.
\nFor built-in\n task types, use one of the following Amazon SageMaker Ground Truth Lambda function ARNs for\n PreHumanTaskLambdaArn
. For custom labeling workflows, see Pre-annotation Lambda.
\n Bounding box - Finds the most similar boxes from\n different workers based on the Jaccard index of the boxes.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-BoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-BoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-BoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-BoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-BoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-BoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-BoundingBox
\n
\n Image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of an image based on\n annotations from individual workers.
\n\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClass
\n
\n Multi-label image classification - Uses a variant of the Expectation\n Maximization approach to estimate the true classes of an image based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-ImageMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-ImageMultiClassMultiLabel
\n
\n Semantic segmentation - Treats each pixel in an image as\n a multi-class classification and treats pixel annotations from workers as\n \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-SemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-SemanticSegmentation
\n
\n Text classification - Uses a variant of the Expectation\n Maximization approach to estimate the true class of text based on annotations\n from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClass
\n
\n Multi-label text classification - Uses a variant of the\n Expectation Maximization approach to estimate the true classes of text based on\n annotations from individual workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-TextMultiClassMultiLabel
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-TextMultiClassMultiLabel
\n
\n Named entity recognition - Groups similar selections and\n calculates aggregate boundaries, resolving to most-assigned label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-NamedEntityRecognition
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-NamedEntityRecognition
\n
\n Video Classification - Use this task type when you need workers to classify videos using\n predefined labels that you specify. Workers are shown videos and are asked to choose one\n label for each video.
\n \n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoMultiClass
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoMultiClass
\n
\n Video Frame Object Detection - Use this task type to\n have workers identify and locate objects in a sequence of video frames (images extracted\n from a video) using bounding boxes. For example, you can use this task to ask workers to\n identify and localize various objects in a series of video frames, such as cars, bikes,\n and pedestrians.
\n \n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectDetection
\n
\n Video Frame Object Tracking - Use this task type to\n have workers track the movement of objects in a sequence of video frames (images\n extracted from a video) using bounding boxes. For example, you can use this task to ask\n workers to track the movement of objects, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VideoObjectTracking
\n
\n 3D Point Cloud Modalities\n
\nUse the following pre-annotation lambdas for 3D point cloud labeling modality tasks.\n See 3D Point Cloud Task types\n to learn more.
\n\n \n\n 3D Point Cloud Object Detection - \n Use this task type when you want workers to classify objects in a 3D point cloud by \n drawing 3D cuboids around objects. For example, you can use this task type to ask workers \n to identify different types of objects in a point cloud, such as cars, bikes, and pedestrians.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectDetection
\n
\n 3D Point Cloud Object Tracking - \n Use this task type when you want workers to draw 3D cuboids around objects\n that appear in a sequence of 3D point cloud frames. \n For example, you can use this task type to ask workers to track \n the movement of vehicles across multiple point cloud frames.\n
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudObjectTracking
\n
\n 3D Point Cloud Semantic Segmentation - \n Use this task type when you want workers to create a point-level semantic segmentation masks by \n painting objects in a 3D point cloud using different colors where each color is assigned to one of \n the classes you specify.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-3DPointCloudSemanticSegmentation
\n
\n Use the following ARNs for Label Verification and Adjustment Jobs\n
\nUse label verification and adjustment jobs to review and adjust labels. To learn more,\n see Verify and Adjust Labels .
\n \n\n Bounding box verification - Uses a variant of the\n Expectation Maximization approach to estimate the true class of verification\n judgement for bounding box labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationBoundingBox
\n
\n Bounding box adjustment - Finds the most similar boxes\n from different workers based on the Jaccard index of the adjusted\n annotations.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentBoundingBox
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentBoundingBox
\n
\n Semantic segmentation verification - Uses a variant of\n the Expectation Maximization approach to estimate the true class of verification\n judgment for semantic segmentation labels based on annotations from individual\n workers.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-VerificationSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-VerificationSemanticSegmentation
\n
\n Semantic segmentation adjustment - Treats each pixel in\n an image as a multi-class classification and treats pixel adjusted annotations\n from workers as \"votes\" for the correct label.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentSemanticSegmentation
\n
\n Video Frame Object Detection Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to classify and localize objects in a sequence of video frames.
\n \n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectDetection
\n
\n Video Frame Object Tracking Adjustment - \n Use this task type when you want workers to adjust bounding boxes that workers have added \n to video frames to track object movement across a sequence of video frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-AdjustmentVideoObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-AdjustmentVideoObjectTracking
\n
\n 3D point cloud object detection adjustment - Adjust\n 3D cuboids in a point cloud frame.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectDetection
\n
\n 3D point cloud object tracking adjustment - Adjust 3D\n cuboids across a sequence of point cloud frames.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudObjectTracking
\n
\n 3D point cloud semantic segmentation adjustment -\n Adjust semantic segmentation masks in a 3D point cloud.
\n\n arn:aws:lambda:us-east-1:432418664414:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-east-2:266458841044:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:us-west-2:081040173940:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-1:568282634449:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-1:477331159723:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-2:454466003867:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-south-1:565803892007:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-central-1:203001061592:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-northeast-2:845288260483:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:eu-west-2:487402164563:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ap-southeast-1:377565633583:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
\n arn:aws:lambda:ca-central-1:918755190332:function:PRE-Adjustment3DPointCloudSemanticSegmentation
\n
The amount of time that a worker has to complete a task.
\nIf you create a custom labeling job, the maximum value for this parameter is 8 hours\n (28,800 seconds).
\nIf you create a labeling job using a built-in task type the maximum\n for this parameter depends on the task type you use:
\nFor image and \n text labeling jobs,\n the maximum is 8 hours (28,800 seconds).
\nFor 3D point cloud and video frame labeling jobs, the maximum is 30 days (2952,000 seconds) for non-AL mode. For most users, the maximum is also 30 days.
\nThe amount of time that a worker has to complete a task.
\nIf you create a custom labeling job, the maximum value for this parameter is 8 hours\n (28,800 seconds).
\nIf you create a labeling job using a built-in task type the maximum\n for this parameter depends on the task type you use:
\nFor image and \n text labeling jobs,\n the maximum is 8 hours (28,800 seconds).
\nFor 3D point cloud and video frame labeling jobs, the maximum is 30 days (2952,000 seconds) for non-AL mode. For most users, the maximum is also 30 days.
\nThe length of time that a task remains available for labeling by human workers. The\n default and maximum values for this parameter depend on the type of workforce you\n use.
\nIf you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43,200 seconds).\n The default is 6 hours (21,600 seconds).
\nIf you choose a private or vendor workforce, the default value is 30 days (2592,000 seconds) for non-AL mode. For most users, the maximum is also 30 days.
\nThe length of time that a task remains available for labeling by human workers. The\n default and maximum values for this parameter depend on the type of workforce you\n use.
\nIf you choose the Amazon Mechanical Turk workforce, the maximum is 12 hours (43,200 seconds).\n The default is 6 hours (21,600 seconds).
\nIf you choose a private or vendor workforce, the default value is 30 days (2592,000 seconds) for non-AL mode. For most users, the maximum is also 30 days.
\nThe resources,\n including\n the compute instances and storage volumes, to use for the training\n jobs that the tuning job launches.
\nStorage volumes store model artifacts and\n incremental\n states. Training algorithms might also use storage volumes for\n scratch\n space. If you want SageMaker to use the storage volume to store the\n training data, choose File
as the TrainingInputMode
in the\n algorithm specification. For distributed training algorithms, specify an instance count\n greater than 1.
If you want to use hyperparameter optimization with instance type flexibility, use\n HyperParameterTuningResourceConfig
instead.
The resources,\n including\n the compute instances and storage volumes, to use for the training\n jobs that the tuning job launches.
\nStorage volumes store model artifacts and\n incremental\n states. Training algorithms might also use storage volumes for\n scratch\n space. If you want SageMaker to use the storage volume to store the\n training data, choose File
as the TrainingInputMode
in the\n algorithm specification. For distributed training algorithms, specify an instance count\n greater than 1.
If you want to use hyperparameter optimization with instance type flexibility, use\n HyperParameterTuningResourceConfig
instead.
The status of the objective metric for the training job:
\nSucceeded: The\n final\n objective metric for the training job was evaluated by the\n hyperparameter tuning job and\n used\n in the hyperparameter tuning process.
\nPending: The training job is in progress and evaluation of its final objective\n metric is pending.
\nFailed:\n The final objective metric for the training job was not evaluated, and was not\n used in the hyperparameter tuning process. This typically occurs when the\n training job failed or did not emit an objective\n metric.
\nThe status of the objective metric for the training job:
\nSucceeded: The\n final\n objective metric for the training job was evaluated by the\n hyperparameter tuning job and\n used\n in the hyperparameter tuning process.
\nPending: The training job is in progress and evaluation of its final objective\n metric is pending.
\nFailed:\n The final objective metric for the training job was not evaluated, and was not\n used in the hyperparameter tuning process. This typically occurs when the\n training job failed or did not emit an objective\n metric.
\nThe number of instances of the type specified by InstanceType
. Choose an\n instance count larger than 1 for distributed training algorithms. See SageMaker distributed training\n jobs for more informcration.
The number of instances of the type specified by InstanceType
. Choose an\n instance count larger than 1 for distributed training algorithms. See SageMaker distributed training\n jobs for more information.
The HyperParameterTuningJobObjective specifies the objective metric\n used to evaluate the performance of training jobs launched by this tuning job.
" + "smithy.api#documentation": "The HyperParameterTuningJobObjective object that specifies the\n objective\n metric for this tuning job.
" } }, "ResourceLimits": { "target": "com.amazonaws.sagemaker#ResourceLimits", "traits": { - "smithy.api#documentation": "The ResourceLimits object that specifies the maximum number of\n training and parallel training jobs that can be used for this hyperparameter tuning\n job.
", + "smithy.api#documentation": "The ResourceLimits object that specifies the\n maximum\n number of training jobs and parallel training jobs for this tuning\n job.
", "smithy.api#required": {} } }, "ParameterRanges": { "target": "com.amazonaws.sagemaker#ParameterRanges", "traits": { - "smithy.api#documentation": "The ParameterRanges object that specifies the ranges of\n hyperparameters that this tuning job searches over to find the optimal configuration for\n the highest model performance against your chosen objective metric.
" + "smithy.api#documentation": "The ParameterRanges object that specifies the ranges of\n hyperparameters\n that this tuning job searches.
" } }, "TrainingJobEarlyStoppingType": { "target": "com.amazonaws.sagemaker#TrainingJobEarlyStoppingType", "traits": { - "smithy.api#documentation": "Specifies whether to use early stopping for training jobs launched by the\n hyperparameter tuning job. Because the Hyperband
strategy has its own\n advanced internal early stopping mechanism, TrainingJobEarlyStoppingType
\n must be OFF
to use Hyperband
. This parameter can take on one\n of the following values (the default value is OFF
):
Training jobs launched by the hyperparameter tuning job do not use early\n stopping.
\nSageMaker stops training jobs launched by the hyperparameter tuning job when\n they are unlikely to perform better than previously completed training jobs.\n For more information, see Stop Training Jobs Early.
\nSpecifies whether to use early stopping for training jobs launched by the\n hyperparameter tuning job. Because the Hyperband
strategy has its own\n advanced internal early stopping mechanism, TrainingJobEarlyStoppingType
\n must be OFF
to use Hyperband
. This parameter can take on one\n of the following values (the default value is OFF
):
Training jobs launched by the hyperparameter tuning job do not use early\n stopping.
\nSageMaker stops training jobs launched by the hyperparameter tuning job when\n they are unlikely to perform better than previously completed training jobs.\n For more information, see Stop Training Jobs Early.
\nThe tuning job's completion criteria.
" } - }, - "RandomSeed": { - "target": "com.amazonaws.sagemaker#RandomSeed", - "traits": { - "smithy.api#documentation": "A value used to initialize a pseudo-random number generator. Setting a random seed and\n using the same seed later for the same tuning job will allow hyperparameter optimization\n to find more a consistent hyperparameter configuration between the two runs.
" - } } }, "traits": { @@ -24704,20 +24535,20 @@ "ParentHyperParameterTuningJobs": { "target": "com.amazonaws.sagemaker#ParentHyperParameterTuningJobs", "traits": { - "smithy.api#documentation": "An array of hyperparameter tuning jobs that are used as the starting point for the new\n hyperparameter tuning job. For more information about warm starting a hyperparameter\n tuning job, see Using a Previous\n Hyperparameter Tuning Job as a Starting Point.
\nHyperparameter tuning jobs created before October 1, 2018 cannot be used as parent\n jobs for warm start tuning jobs.
", + "smithy.api#documentation": "An array of hyperparameter tuning jobs that are used as the starting point for the new\n hyperparameter tuning job. For more information about warm starting a hyperparameter\n tuning job, see Using a Previous\n Hyperparameter Tuning Job as a Starting Point.
\nHyperparameter tuning jobs created before October 1, 2018 cannot be used as parent\n jobs for warm start tuning jobs.
", "smithy.api#required": {} } }, "WarmStartType": { "target": "com.amazonaws.sagemaker#HyperParameterTuningJobWarmStartType", "traits": { - "smithy.api#documentation": "Specifies one of the following:
\nThe new hyperparameter tuning job uses the same input data and training\n image as the parent tuning jobs. You can change the hyperparameter ranges to\n search and the maximum number of training jobs that the hyperparameter\n tuning job launches. You cannot use a new version of the training algorithm,\n unless the changes in the new version do not affect the algorithm itself.\n For example, changes that improve logging or adding support for a different\n data format are allowed. You can also change hyperparameters from tunable to\n static, and from static to tunable, but the total number of static plus\n tunable hyperparameters must remain the same as it is in all parent jobs.\n The objective metric for the new tuning job must be the same as for all\n parent jobs.
\nThe new hyperparameter tuning job can include input data, hyperparameter\n ranges, maximum number of concurrent training jobs, and maximum number of\n training jobs that are different than those of its parent hyperparameter\n tuning jobs. The training image can also be a different version from the\n version used in the parent hyperparameter tuning job. You can also change\n hyperparameters from tunable to static, and from static to tunable, but the\n total number of static plus tunable hyperparameters must remain the same as\n it is in all parent jobs. The objective metric for the new tuning job must\n be the same as for all parent jobs.
\nSpecifies one of the following:
\nThe new hyperparameter tuning job uses the same input data and training\n image as the parent tuning jobs. You can change the hyperparameter ranges to\n search and the maximum number of training jobs that the hyperparameter\n tuning job launches. You cannot use a new version of the training algorithm,\n unless the changes in the new version do not affect the algorithm itself.\n For example, changes that improve logging or adding support for a different\n data format are allowed. You can also change hyperparameters from tunable to\n static, and from static to tunable, but the total number of static plus\n tunable hyperparameters must remain the same as it is in all parent jobs.\n The objective metric for the new tuning job must be the same as for all\n parent jobs.
\nThe new hyperparameter tuning job can include input data, hyperparameter\n ranges, maximum number of concurrent training jobs, and maximum number of\n training jobs that are different than those of its parent hyperparameter\n tuning jobs. The training image can also be a different version from the\n version used in the parent hyperparameter tuning job. You can also change\n hyperparameters from tunable to static, and from static to tunable, but the\n total number of static plus tunable hyperparameters must remain the same as\n it is in all parent jobs. The objective metric for the new tuning job must\n be the same as for all parent jobs.
\nSpecifies the configuration for a hyperparameter tuning job that uses one or more\n previous hyperparameter tuning jobs as a starting point. The results of previous tuning\n jobs are used to inform which combinations of hyperparameters to search over in the new\n tuning job.
\nAll training jobs launched by the new hyperparameter tuning job are evaluated by using\n the objective metric, and the training job that performs the best is compared to the\n best training jobs from the parent tuning jobs. From these, the training job that\n performs the best as measured by the objective metric is returned as the overall best\n training job.
\nAll training jobs launched by parent hyperparameter tuning jobs and the new\n hyperparameter tuning jobs count against the limit of training jobs for the tuning\n job.
\nSpecifies the configuration for a hyperparameter tuning job that uses one or more\n previous hyperparameter tuning jobs as a starting point. The results of previous tuning\n jobs are used to inform which combinations of hyperparameters to search over in the new\n tuning job.
\nAll training jobs launched by the new hyperparameter tuning job are evaluated by using\n the objective metric, and the training job that performs the best is compared to the\n best training jobs from the parent tuning jobs. From these, the training job that\n performs the best as measured by the objective metric is returned as the overall best\n training job.
\nAll training jobs launched by parent hyperparameter tuning jobs and the new\n hyperparameter tuning jobs count against the limit of training jobs for the tuning\n job.
\nThe volume size in GB for the storage volume to be used in processing hyperparameter\n optimization jobs (optional). These volumes store model artifacts, incremental states\n and optionally, scratch space for training algorithms. Do not provide a value for this\n parameter if a value for InstanceConfigs
is also specified.
Some instance types have a fixed total local storage size. If you select one of these\n instances for training, VolumeSizeInGB
cannot be greater than this total\n size. For a list of instance types with local instance storage and their sizes, see\n instance store volumes.
SageMaker supports only the General Purpose SSD\n (gp2) storage volume type.
\nThe volume size in GB for the storage volume to be used in processing hyperparameter\n optimization jobs (optional). These volumes store model artifacts, incremental states\n and optionally, scratch space for training algorithms. Do not provide a value for this\n parameter if a value for InstanceConfigs
is also specified.
Some instance types have a fixed total local storage size. If you select one of these\n instances for training, VolumeSizeInGB
cannot be greater than this total\n size. For a list of instance types with local instance storage and their sizes, see\n instance store volumes.
SageMaker supports only the General Purpose SSD\n (gp2) storage volume type.
\nA key used by Amazon Web Services Key Management Service to encrypt data on the storage volume\n attached to the compute instances used to run the training job. You can use either of\n the following formats to specify a key.
\nKMS Key ID:
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS key:
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Some instances use local storage, which use a hardware module to\n encrypt storage volumes. If you choose one of these instance types, you\n cannot request a VolumeKmsKeyId
. For a list of instance types that use\n local storage, see instance store\n volumes. For more information about Amazon Web Services Key Management Service, see KMS\n encryption for more information.
A key used by Amazon Web Services Key Management Service to encrypt data on the storage volume\n attached to the compute instances used to run the training job. You can use either of\n the following formats to specify a key.
\nKMS Key ID:
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS key:
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Some instances use local storage, which use a hardware module to\n encrypt storage volumes. If you choose one of these instance types, you\n cannot request a VolumeKmsKeyId
. For a list of instance types that use\n local storage, see instance store\n volumes. For more information about Amazon Web Services Key Management Service, see KMS\n encryption for more information.
A list containing the configuration(s) for one or more resources for processing\n hyperparameter jobs. These resources include compute instances and storage volumes to\n use in model training jobs launched by hyperparameter tuning jobs. The\n AllocationStrategy
controls the order in which multiple configurations\n provided in InstanceConfigs
are used.
If you only want to use a single instance configuration inside the\n HyperParameterTuningResourceConfig
API, do not provide a value for\n InstanceConfigs
. Instead, use InstanceType
,\n VolumeSizeInGB
and InstanceCount
. If you use\n InstanceConfigs
, do not provide values for\n InstanceType
, VolumeSizeInGB
or\n InstanceCount
.
A list containing the configuration(s) for one or more resources for processing\n hyperparameter jobs. These resources include compute instances and storage volumes to\n use in model training jobs launched by hyperparameter tuning jobs. The\n AllocationStrategy
controls the order in which multiple configurations\n provided in InstanceConfigs
are used.
If you only want to use a single instance configuration inside the\n HyperParameterTuningResourceConfig
API, do not provide a value for\n InstanceConfigs
. Instead, use InstanceType
,\n VolumeSizeInGB
and InstanceCount
. If you use\n InstanceConfigs
, do not provide values for\n InstanceType
, VolumeSizeInGB
or\n InstanceCount
.
The configuration of resources, including compute instances and storage volumes for\n use in training jobs launched by hyperparameter tuning jobs.\n HyperParameterTuningResourceConfig
is similar to\n ResourceConfig
, but has the additional InstanceConfigs
and\n AllocationStrategy
fields to allow for flexible instance management.\n Specify one or more instance types, count, and the allocation strategy for instance\n selection.
\n HyperParameterTuningResourceConfig
supports the capabilities of\n ResourceConfig
with the exception of\n KeepAlivePeriodInSeconds
. Hyperparameter tuning jobs use warm pools\n by default, which reuse clusters between training jobs.
The configuration of resources, including compute instances and storage volumes for\n use in training jobs launched by hyperparameter tuning jobs. Specify one or more\n instance type and count and the allocation strategy for instance selection.
\n\n HyperParameterTuningResourceConfig
supports all of the capabilities\n of ResourceConfig with added functionality for flexible instance management.
The ARN of the image.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image.
", "smithy.api#required": {} } }, @@ -24940,7 +24771,7 @@ "RepositoryAccessMode": { "target": "com.amazonaws.sagemaker#RepositoryAccessMode", "traits": { - "smithy.api#documentation": "Set this to one of the following values:
\n\n Platform
- The model image is hosted in Amazon ECR.
\n Vpc
- The model image is hosted in a private Docker registry in\n your VPC.
Set this to one of the following values:
\n\n Platform
- The model image is hosted in Amazon ECR.
\n Vpc
- The model image is hosted in a private Docker registry in\n your VPC.
The ARN of the image the version is based on.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image the version is based on.
", "smithy.api#required": {} } }, @@ -25418,7 +25249,7 @@ "Mode": { "target": "com.amazonaws.sagemaker#InferenceExecutionMode", "traits": { - "smithy.api#documentation": "How containers in a multi-container are run. The following values are valid.
\n\n SERIAL
- Containers run as a serial pipeline.
\n DIRECT
- Only the individual container that you specify is\n run.
How containers in a multi-container are run. The following values are valid.
\n\n SERIAL
- Containers run as a serial pipeline.
\n DIRECT
- Only the individual container that you specify is\n run.
The Amazon S3 bucket where the inference request and response data is stored.
", + "smithy.api#documentation": "The Amazon S3 bucket where the inference experiment data is stored.
", "smithy.api#required": {} } }, "KmsKey": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "\n The Amazon Web Services Key Management Service key that Amazon SageMaker uses to encrypt captured data at rest using Amazon S3\n server-side encryption.\n
" + "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service key that Amazon SageMaker uses to encrypt\n captured data when uploading to Amazon S3.\n
" } }, "ContentType": { @@ -25475,7 +25306,7 @@ } }, "traits": { - "smithy.api#documentation": "The Amazon S3 location and configuration for storing inference request and response data.
" + "smithy.api#documentation": "The Amazon S3 location and configuration for storing inference experiment data.
" } }, "com.amazonaws.sagemaker#InferenceExperimentDescription": { @@ -25669,7 +25500,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "\n The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container images, and manage\n Amazon SageMaker Inference endpoints for model deployment.\n
" + "smithy.api#documentation": "\n The ARN of the IAM role that Amazon SageMaker can assume to access model artifacts and container\n images.\n
" } } }, @@ -25898,13 +25729,13 @@ "SupportedTransformInstanceTypes": { "target": "com.amazonaws.sagemaker#TransformInstanceTypes", "traits": { - "smithy.api#documentation": "A list of the instance types on which a transformation job can be run or on which an\n endpoint can be deployed.
\nThis parameter is required for unversioned models, and optional for versioned\n models.
" + "smithy.api#documentation": "A list of the instance types on which a transformation job can be run or on which an\n endpoint can be deployed.
\nThis parameter is required for unversioned models, and optional for versioned\n models.
" } }, "SupportedRealtimeInferenceInstanceTypes": { "target": "com.amazonaws.sagemaker#RealtimeInferenceInstanceTypes", "traits": { - "smithy.api#documentation": "A list of the instance types that are used to generate inferences in real-time.
\nThis parameter is required for unversioned models, and optional for versioned\n models.
" + "smithy.api#documentation": "A list of the instance types that are used to generate inferences in real-time.
\nThis parameter is required for unversioned models, and optional for versioned\n models.
" } }, "SupportedContentTypes": { @@ -25965,7 +25796,7 @@ "DataInputConfig": { "target": "com.amazonaws.sagemaker#DataInputConfig", "traits": { - "smithy.api#documentation": "Specifies the name and shape of the expected data inputs for your trained model with a\n JSON dictionary form. The data inputs are InputConfig$Framework\n specific.
\n\n TensorFlow
: You must specify the name and shape (NHWC format) of\n the expected data inputs using a dictionary format for your trained model. The\n dictionary formats required for the console and CLI are different.
Examples for one input:
\nIf using the console,\n {\"input\":[1,1024,1024,3]}
\n
If using the CLI,\n {\\\"input\\\":[1,1024,1024,3]}
\n
Examples for two inputs:
\nIf using the console, {\"data1\": [1,28,28,1],\n \"data2\":[1,28,28,1]}
\n
If using the CLI, {\\\"data1\\\": [1,28,28,1],\n \\\"data2\\\":[1,28,28,1]}
\n
\n KERAS
: You must specify the name and shape (NCHW format) of\n expected data inputs using a dictionary format for your trained model. Note that\n while Keras model artifacts should be uploaded in NHWC (channel-last) format,\n DataInputConfig
should be specified in NCHW (channel-first)\n format. The dictionary formats required for the console and CLI are\n different.
Examples for one input:
\nIf using the console,\n {\"input_1\":[1,3,224,224]}
\n
If using the CLI,\n {\\\"input_1\\\":[1,3,224,224]}
\n
Examples for two inputs:
\nIf using the console, {\"input_1\": [1,3,224,224],\n \"input_2\":[1,3,224,224]}
\n
If using the CLI, {\\\"input_1\\\": [1,3,224,224],\n \\\"input_2\\\":[1,3,224,224]}
\n
\n MXNET/ONNX/DARKNET
: You must specify the name and shape (NCHW format) of\n the expected data inputs in order using a dictionary format for your trained\n model. The dictionary formats required for the console and CLI are\n different.
Examples for one input:
\nIf using the console,\n {\"data\":[1,3,1024,1024]}
\n
If using the CLI,\n {\\\"data\\\":[1,3,1024,1024]}
\n
Examples for two inputs:
\nIf using the console, {\"var1\": [1,1,28,28],\n \"var2\":[1,1,28,28]}
\n
If using the CLI, {\\\"var1\\\": [1,1,28,28],\n \\\"var2\\\":[1,1,28,28]}
\n
\n PyTorch
: You can either specify the name and shape (NCHW format)\n of expected data inputs in order using a dictionary format for your trained\n model or you can specify the shape only using a list format. The dictionary\n formats required for the console and CLI are different. The list formats for the\n console and CLI are the same.
Examples for one input in dictionary format:
\nIf using the console,\n {\"input0\":[1,3,224,224]}
\n
If using the CLI,\n {\\\"input0\\\":[1,3,224,224]}
\n
Example for one input in list format:\n [[1,3,224,224]]
\n
Examples for two inputs in dictionary format:
\nIf using the console, {\"input0\":[1,3,224,224],\n \"input1\":[1,3,224,224]}
\n
If using the CLI, {\\\"input0\\\":[1,3,224,224],\n \\\"input1\\\":[1,3,224,224]}
\n
Example for two inputs in list format: [[1,3,224,224],\n [1,3,224,224]]
\n
\n XGBOOST
: input data name and shape are not needed.
\n DataInputConfig
supports the following parameters for CoreML
\n OutputConfig$TargetDevice (ML Model format):
\n shape
: Input shape, for example {\"input_1\": {\"shape\": [1,224,224,3]}}
.\n In addition to static input shapes, CoreML converter supports Flexible input shapes:
Range Dimension. You can use the Range Dimension feature if you know the input shape\n will be within some specific interval in that dimension,\n for example: {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}
\n
Enumerated shapes. Sometimes, the models are trained to work only on a select\n set of inputs. You can enumerate all supported input shapes,\n for example: {\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}
\n
\n default_shape
: Default input shape. You can set a default shape during\n conversion for both Range Dimension and Enumerated Shapes. For example\n {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}
\n
\n type
: Input type. Allowed values: Image
and Tensor
.\n By default, the converter generates an ML Model with inputs of type Tensor (MultiArray).\n User can set input type to be Image. Image input type requires additional input parameters\n such as bias
and scale
.
\n bias
: If the input type is an Image, you need to provide the bias vector.
\n scale
: If the input type is an Image, you need to provide a scale factor.
CoreML ClassifierConfig
parameters can be specified using\n OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models.\n CoreML conversion examples:
Tensor type input:
\n\n \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\":\n [1,224,224,3]}}
\n
Tensor type input without input name (PyTorch):
\n\n \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\":\n [1,3,224,224]}]
\n
Image type input:
\n\n \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\":\n [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}
\n
\n \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}
\n
Image type input without input name (PyTorch):
\n\n \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\":\n [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]
\n
\n \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}
\n
Depending on the model format, DataInputConfig
requires the following parameters for \n ml_eia2
\n OutputConfig:TargetDevice.
For TensorFlow models saved in the SavedModel format, specify the input names \n from signature_def_key
and the input model shapes for DataInputConfig
. \n Specify the signature_def_key
in \n \n OutputConfig:CompilerOptions
\n if \n the model does not use TensorFlow's default signature def key. For example:
\n \"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}
\n
\n \"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}
\n
For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes \n in DataInputConfig
and the output tensor names for output_names
in \n \n OutputConfig:CompilerOptions
\n .\n For example:
\n \"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}
\n
\n \"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}
\n
Specifies the name and shape of the expected data inputs for your trained model with a\n JSON dictionary form. The data inputs are InputConfig$Framework\n specific.
\n\n TensorFlow
: You must specify the name and shape (NHWC format) of\n the expected data inputs using a dictionary format for your trained model. The\n dictionary formats required for the console and CLI are different.
Examples for one input:
\nIf using the console,\n {\"input\":[1,1024,1024,3]}
\n
If using the CLI,\n {\\\"input\\\":[1,1024,1024,3]}
\n
Examples for two inputs:
\nIf using the console, {\"data1\": [1,28,28,1],\n \"data2\":[1,28,28,1]}
\n
If using the CLI, {\\\"data1\\\": [1,28,28,1],\n \\\"data2\\\":[1,28,28,1]}
\n
\n KERAS
: You must specify the name and shape (NCHW format) of\n expected data inputs using a dictionary format for your trained model. Note that\n while Keras model artifacts should be uploaded in NHWC (channel-last) format,\n DataInputConfig
should be specified in NCHW (channel-first)\n format. The dictionary formats required for the console and CLI are\n different.
Examples for one input:
\nIf using the console,\n {\"input_1\":[1,3,224,224]}
\n
If using the CLI,\n {\\\"input_1\\\":[1,3,224,224]}
\n
Examples for two inputs:
\nIf using the console, {\"input_1\": [1,3,224,224],\n \"input_2\":[1,3,224,224]}
\n
If using the CLI, {\\\"input_1\\\": [1,3,224,224],\n \\\"input_2\\\":[1,3,224,224]}
\n
\n MXNET/ONNX/DARKNET
: You must specify the name and shape (NCHW format) of\n the expected data inputs in order using a dictionary format for your trained\n model. The dictionary formats required for the console and CLI are\n different.
Examples for one input:
\nIf using the console,\n {\"data\":[1,3,1024,1024]}
\n
If using the CLI,\n {\\\"data\\\":[1,3,1024,1024]}
\n
Examples for two inputs:
\nIf using the console, {\"var1\": [1,1,28,28],\n \"var2\":[1,1,28,28]}
\n
If using the CLI, {\\\"var1\\\": [1,1,28,28],\n \\\"var2\\\":[1,1,28,28]}
\n
\n PyTorch
: You can either specify the name and shape (NCHW format)\n of expected data inputs in order using a dictionary format for your trained\n model or you can specify the shape only using a list format. The dictionary\n formats required for the console and CLI are different. The list formats for the\n console and CLI are the same.
Examples for one input in dictionary format:
\nIf using the console,\n {\"input0\":[1,3,224,224]}
\n
If using the CLI,\n {\\\"input0\\\":[1,3,224,224]}
\n
Example for one input in list format:\n [[1,3,224,224]]
\n
Examples for two inputs in dictionary format:
\nIf using the console, {\"input0\":[1,3,224,224],\n \"input1\":[1,3,224,224]}
\n
If using the CLI, {\\\"input0\\\":[1,3,224,224],\n \\\"input1\\\":[1,3,224,224]}
\n
Example for two inputs in list format: [[1,3,224,224],\n [1,3,224,224]]
\n
\n XGBOOST
: input data name and shape are not needed.
\n DataInputConfig
supports the following parameters for CoreML
\n OutputConfig$TargetDevice (ML Model format):
\n shape
: Input shape, for example {\"input_1\": {\"shape\": [1,224,224,3]}}
.\n In addition to static input shapes, CoreML converter supports Flexible input shapes:
Range Dimension. You can use the Range Dimension feature if you know the input shape\n will be within some specific interval in that dimension,\n for example: {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3]}}
\n
Enumerated shapes. Sometimes, the models are trained to work only on a select\n set of inputs. You can enumerate all supported input shapes,\n for example: {\"input_1\": {\"shape\": [[1, 224, 224, 3], [1, 160, 160, 3]]}}
\n
\n default_shape
: Default input shape. You can set a default shape during\n conversion for both Range Dimension and Enumerated Shapes. For example\n {\"input_1\": {\"shape\": [\"1..10\", 224, 224, 3], \"default_shape\": [1, 224, 224, 3]}}
\n
\n type
: Input type. Allowed values: Image
and Tensor
.\n By default, the converter generates an ML Model with inputs of type Tensor (MultiArray).\n User can set input type to be Image. Image input type requires additional input parameters\n such as bias
and scale
.
\n bias
: If the input type is an Image, you need to provide the bias vector.
\n scale
: If the input type is an Image, you need to provide a scale factor.
CoreML ClassifierConfig
parameters can be specified using\n OutputConfig$CompilerOptions. CoreML converter supports Tensorflow and PyTorch models.\n CoreML conversion examples:
Tensor type input:
\n\n \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\":\n [1,224,224,3]}}
\n
Tensor type input without input name (PyTorch):
\n\n \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\":\n [1,3,224,224]}]
\n
Image type input:
\n\n \"DataInputConfig\": {\"input_1\": {\"shape\": [[1,224,224,3], [1,160,160,3]], \"default_shape\":\n [1,224,224,3], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}}
\n
\n \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}
\n
Image type input without input name (PyTorch):
\n\n \"DataInputConfig\": [{\"shape\": [[1,3,224,224], [1,3,160,160]], \"default_shape\":\n [1,3,224,224], \"type\": \"Image\", \"bias\": [-1,-1,-1], \"scale\": 0.007843137255}]
\n
\n \"CompilerOptions\": {\"class_labels\": \"imagenet_labels_1000.txt\"}
\n
Depending on the model format, DataInputConfig
requires the following parameters for \n ml_eia2
\n OutputConfig:TargetDevice.
For TensorFlow models saved in the SavedModel format, specify the input names \n from signature_def_key
and the input model shapes for DataInputConfig
. \n Specify the signature_def_key
in \n \n OutputConfig:CompilerOptions
\n if \n the model does not use TensorFlow's default signature def key. For example:
\n \"DataInputConfig\": {\"inputs\": [1, 224, 224, 3]}
\n
\n \"CompilerOptions\": {\"signature_def_key\": \"serving_custom\"}
\n
For TensorFlow models saved as a frozen graph, specify the input tensor names and shapes \n in DataInputConfig
and the output tensor names for output_names
in \n \n OutputConfig:CompilerOptions
\n .\n For example:
\n \"DataInputConfig\": {\"input_tensor:0\": [1, 224, 224, 3]}
\n
\n \"CompilerOptions\": {\"output_names\": [\"output_tensor:0\"]}
\n
Specifies the framework version to use. This API field is only supported for the MXNet,\n\t PyTorch, TensorFlow and TensorFlow Lite frameworks.
\nFor information about framework versions supported for cloud targets and edge devices, see \n Cloud Supported Instance Types and Frameworks and \n Edge Supported Frameworks.
" + "smithy.api#documentation": "Specifies the framework version to use. This API field is only supported for the MXNet,\n\t PyTorch, TensorFlow and TensorFlow Lite frameworks.
\nFor information about framework versions supported for cloud targets and edge devices, see \n Cloud Supported Instance Types and Frameworks and \n Edge Supported Frameworks.
" } } }, @@ -26558,7 +26389,7 @@ "ScalingType": { "target": "com.amazonaws.sagemaker#HyperParameterScalingType", "traits": { - "smithy.api#documentation": "The scale that hyperparameter tuning uses to search the hyperparameter range. For\n information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
\nSageMaker hyperparameter tuning chooses the best scale for the\n hyperparameter.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a linear scale.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a logarithmic scale.
\nLogarithmic scaling works only for ranges that have only values greater\n than 0.
\nThe scale that hyperparameter tuning uses to search the hyperparameter range. For\n information about choosing a hyperparameter scale, see Hyperparameter Scaling. One of the following values:
\nSageMaker hyperparameter tuning chooses the best scale for the\n hyperparameter.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a linear scale.
\nHyperparameter tuning searches the values in the hyperparameter range by\n using a logarithmic scale.
\nLogarithmic scaling works only for ranges that have only values greater\n than 0.
\n The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the DefaultResourceSpec
parameter is also required.
To remove a Lifecycle Config, you must set LifecycleConfigArns
to an empty list.
The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the JupyterServerApp. If you use this parameter, the DefaultResourceSpec
parameter is also required.
To remove a Lifecycle Config, you must set LifecycleConfigArns
to an empty list.
The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.
\nTo remove a Lifecycle Config, you must set LifecycleConfigArns
to an empty list.
The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.
\nTo remove a Lifecycle Config, you must set LifecycleConfigArns
to an empty list.
Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You\n must select one of the following ARNs:
\n\n Image classification\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification
\n
\n Text classification\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification
\n
\n Object detection\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection
\n
\n Semantic Segmentation\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/semantic-segmentation
\n
Specifies the Amazon Resource Name (ARN) of the algorithm used for auto-labeling. You\n must select one of the following ARNs:
\n\n Image classification\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/image-classification
\n
\n Text classification\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/text-classification
\n
\n Object detection\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/object-detection
\n
\n Semantic Segmentation\n
\n\n arn:aws:sagemaker:region:027400017018:labeling-job-algorithm-specification/semantic-segmentation
\n
Provides information about the location of input data.
\nYou must specify at least one of the following: S3DataSource
or SnsDataSource
.
Use SnsDataSource
to specify an SNS input topic\n for a streaming labeling job. If you do not specify \n and SNS input topic ARN, Ground Truth will create a one-time labeling job.
Use S3DataSource
to specify an input \n manifest file for both streaming and one-time labeling jobs.\n Adding an S3DataSource
is optional if you use SnsDataSource
to create a streaming labeling job.
Provides information about the location of input data.
\n \nYou must specify at least one of the following: S3DataSource
or SnsDataSource
.
Use SnsDataSource
to specify an SNS input topic\n for a streaming labeling job. If you do not specify \n and SNS input topic ARN, Ground Truth will create a one-time labeling job.
Use S3DataSource
to specify an input \n manifest file for both streaming and one-time labeling jobs.\n Adding an S3DataSource
is optional if you use SnsDataSource
to create a streaming labeling job.
The Amazon Web Services Key Management Service ID of the key used to encrypt the output data, if any.
\nIf you provide your own KMS key ID, you must add the required permissions to your KMS\n key described in Encrypt Output Data and Storage Volume with Amazon Web Services KMS.
\nIf you don't provide a KMS key ID, Amazon SageMaker uses the default Amazon Web Services KMS key for Amazon S3 for your\n role's account to encrypt your output data.
\nIf you use a bucket policy with an s3:PutObject
permission that only\n allows objects with server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer\n Guide.\n
The Amazon Web Services Key Management Service ID of the key used to encrypt the output data, if any.
\nIf you provide your own KMS key ID, you must add the required permissions to your KMS\n key described in Encrypt Output Data and Storage Volume with Amazon Web Services KMS.
\nIf you don't provide a KMS key ID, Amazon SageMaker uses the default Amazon Web Services KMS key for Amazon S3 for your\n role's account to encrypt your output data.
\nIf you use a bucket policy with an s3:PutObject
permission that only\n allows objects with server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption Keys in the Amazon Simple Storage Service Developer\n Guide.\n
An Amazon Simple Notification Service (Amazon SNS) output topic ARN. Provide a SnsTopicArn
if you want to\n do real time chaining to another streaming job and receive an Amazon SNS notifications each\n time a data object is submitted by a worker.
If you provide an SnsTopicArn
in OutputConfig
, when workers\n complete labeling tasks, Ground Truth will send labeling task output data to the SNS output\n topic you specify here.
To learn more, see Receive Output Data from a Streaming Labeling\n Job.
" + "smithy.api#documentation": "An Amazon Simple Notification Service (Amazon SNS) output topic ARN. Provide a SnsTopicArn
if you want to\n do real time chaining to another streaming job and receive an Amazon SNS notifications each\n time a data object is submitted by a worker.
If you provide an SnsTopicArn
in OutputConfig
, when workers\n complete labeling tasks, Ground Truth will send labeling task output data to the SNS output\n topic you specify here.
To learn more, see Receive Output Data from a Streaming Labeling\n Job.
" } } }, @@ -27191,7 +26999,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training and inference jobs used for\n automated data labeling.
\nYou can only specify a VolumeKmsKeyId
when you create a labeling job with\n automated data labeling enabled using the API operation CreateLabelingJob
.\n You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for\n automated data labeling model training and inference when you create a labeling job\n using the console. To learn more, see Output Data and Storage Volume\n Encryption.
The VolumeKmsKeyId
can be any of the following formats:
KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training and inference jobs used for\n automated data labeling.
\nYou can only specify a VolumeKmsKeyId
when you create a labeling job with\n automated data labeling enabled using the API operation CreateLabelingJob
.\n You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for\n automated data labeling model training and inference when you create a labeling job\n using the console. To learn more, see Output Data and Storage Volume\n Encryption.
The VolumeKmsKeyId
can be any of the following formats:
KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The Amazon S3 location of the manifest file that describes the input data objects.
\nThe input manifest file referenced in ManifestS3Uri
must contain one of\n the following keys: source-ref
or source
. The value of the\n keys are interpreted as follows:
\n source-ref
: The source of the object is the Amazon S3 object\n specified in the value. Use this value when the object is a binary object, such\n as an image.
\n source
: The source of the object is the value. Use this\n value when the object is a text value.
If you are a new user of Ground Truth, it is recommended you review Use an Input Manifest File in the Amazon SageMaker Developer Guide to learn how to\n create an input manifest file.
", + "smithy.api#documentation": "The Amazon S3 location of the manifest file that describes the input data objects.
\nThe input manifest file referenced in ManifestS3Uri
must contain one of\n the following keys: source-ref
or source
. The value of the\n keys are interpreted as follows:
\n source-ref
: The source of the object is the Amazon S3 object\n specified in the value. Use this value when the object is a binary object, such\n as an image.
\n source
: The source of the object is the value. Use this\n value when the object is a text value.
If you are a new user of Ground Truth, it is recommended you review Use an Input Manifest File in the Amazon SageMaker Developer Guide to learn how to\n create an input manifest file.
", "smithy.api#required": {} } } @@ -27290,7 +27098,7 @@ } }, "traits": { - "smithy.api#documentation": "A set of conditions for stopping a labeling job. If any of the conditions are met, the\n job is automatically stopped. You can use these conditions to control the cost of data\n labeling.
\nLabeling jobs fail after 30 days with an appropriate client error message.
\nA set of conditions for stopping a labeling job. If any of the conditions are met, the\n job is automatically stopped. You can use these conditions to control the cost of data\n labeling.
\nLabeling jobs fail after 30 days with an appropriate client error message.
\nLists the aliases of a specified image or image version.
", - "smithy.api#paginated": { - "inputToken": "NextToken", - "outputToken": "NextToken", - "items": "SageMakerImageVersionAliases", - "pageSize": "MaxResults" - } - } - }, - "com.amazonaws.sagemaker#ListAliasesRequest": { - "type": "structure", - "members": { - "ImageName": { - "target": "com.amazonaws.sagemaker#ImageName", - "traits": { - "smithy.api#documentation": "The name of the image.
", - "smithy.api#required": {} - } - }, - "Alias": { - "target": "com.amazonaws.sagemaker#SageMakerImageVersionAlias", - "traits": { - "smithy.api#documentation": "The alias of the image version.
" - } - }, - "Version": { - "target": "com.amazonaws.sagemaker#ImageVersionNumber", - "traits": { - "smithy.api#documentation": "The version of the image. If image version is not specified, the aliases of all versions of the image are listed.
" - } - }, - "MaxResults": { - "target": "com.amazonaws.sagemaker#MaxResults", - "traits": { - "smithy.api#documentation": "The maximum number of aliases to return.
" - } - }, - "NextToken": { - "target": "com.amazonaws.sagemaker#NextToken", - "traits": { - "smithy.api#documentation": "If the previous call to ListAliases
didn't return the full set of\n aliases, the call returns a token for retrieving the next set of aliases.
A list of SageMaker image version aliases.
" - } - }, - "NextToken": { - "target": "com.amazonaws.sagemaker#NextToken", - "traits": { - "smithy.api#documentation": "A token for getting the next set of aliases, if more aliases exist.
" - } - } - } - }, "com.amazonaws.sagemaker#ListAppImageConfigs": { "type": "operation", "input": { @@ -27978,13 +27710,13 @@ "UserProfileNameEquals": { "target": "com.amazonaws.sagemaker#UserProfileName", "traits": { - "smithy.api#documentation": "A parameter to search by user profile name. If SpaceNameEquals
is set, then this value cannot be set.
A parameter to search by user profile name.
" } }, "SpaceNameEquals": { "target": "com.amazonaws.sagemaker#SpaceName", "traits": { - "smithy.api#documentation": "A parameter to search by space name. If UserProfileNameEquals
is set, then this value cannot be set.
A parameter to search by space name.
" } } } @@ -28485,7 +28217,7 @@ "CodeRepositorySummaryList": { "target": "com.amazonaws.sagemaker#CodeRepositorySummaryList", "traits": { - "smithy.api#documentation": "Gets a list of summaries of the Git repositories. Each summary specifies the following\n values for the repository:
\nName
\nAmazon Resource Name (ARN)
\nCreation time
\nLast modified time
\nConfiguration information, including the URL location of the repository and\n the ARN of the Amazon Web Services Secrets Manager secret that contains the\n credentials used to access the repository.
\nGets a list of summaries of the Git repositories. Each summary specifies the following\n values for the repository:
\nName
\nAmazon Resource Name (ARN)
\nCreation time
\nLast modified time
\nConfiguration information, including the URL location of the repository and\n the ARN of the Amazon Web Services Secrets Manager secret that contains the\n credentials used to access the repository.
\nLists model compilation jobs that satisfy various filters.
\nTo create a model compilation job, use CreateCompilationJob. To get\n information about a particular model compilation job you have created, use DescribeCompilationJob.
", + "smithy.api#documentation": "Lists model compilation jobs that satisfy various filters.
\nTo create a model compilation job, use CreateCompilationJob. To get\n information about a particular model compilation job you have created, use DescribeCompilationJob.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -31672,7 +31404,7 @@ "ModelPackageType": { "target": "com.amazonaws.sagemaker#ModelPackageType", "traits": { - "smithy.api#documentation": "A filter that returns only the model packages of the specified type. This can be one\n of the following values.
\n\n UNVERSIONED
- List only unversioined models. \n This is the default value if no ModelPackageType
is specified.
\n VERSIONED
- List only versioned models.
\n BOTH
- List both versioned and unversioned models.
A filter that returns only the model packages of the specified type. This can be one\n of the following values.
\n\n UNVERSIONED
- List only unversioined models. \n This is the default value if no ModelPackageType
is specified.
\n VERSIONED
- List only versioned models.
\n BOTH
- List both versioned and unversioned models.
If the previous call to the ListNotebookInstances
is truncated, the\n response includes a NextToken
. You can use this token in your subsequent\n ListNotebookInstances
request to fetch the next set of notebook\n instances.
You might specify a filter or a sort order in your request. When response is\n truncated, you must use the same values for the filer and sort order in the next\n request.
\n If the previous call to the ListNotebookInstances
is truncated, the\n response includes a NextToken
. You can use this token in your subsequent\n ListNotebookInstances
request to fetch the next set of notebook\n instances.
You might specify a filter or a sort order in your request. When response is\n truncated, you must use the same values for the filer and sort order in the next\n request.
\nLists training jobs.
\nWhen StatusEquals
and MaxResults
are set at the same\n time, the MaxResults
number of training jobs are first retrieved\n ignoring the StatusEquals
parameter and then they are filtered by the\n StatusEquals
parameter, which is returned as a response.
For example, if ListTrainingJobs
is invoked with the following\n parameters:
\n { ... MaxResults: 100, StatusEquals: InProgress ... }
\n
First, 100 trainings jobs with any status, including those other than\n InProgress
, are selected (sorted according to the creation time,\n from the most current to the oldest). Next, those with a status of\n InProgress
are returned.
You can quickly test the API using the following Amazon Web Services CLI\n code.
\n\n aws sagemaker list-training-jobs --max-results 100 --status-equals\n InProgress
\n
Lists training jobs.
\nWhen StatusEquals
and MaxResults
are set at the same\n time, the MaxResults
number of training jobs are first retrieved\n ignoring the StatusEquals
parameter and then they are filtered by the\n StatusEquals
parameter, which is returned as a response.
For example, if ListTrainingJobs
is invoked with the following\n parameters:
\n { ... MaxResults: 100, StatusEquals: InProgress ... }
\n
First, 100 trainings jobs with any status, including those other than\n InProgress
, are selected (sorted according to the creation time,\n from the most current to the oldest). Next, those with a status of\n InProgress
are returned.
You can quickly test the API using the following Amazon Web Services CLI\n code.
\n\n aws sagemaker list-training-jobs --max-results 100 --status-equals\n InProgress
\n
The field to sort results by. The default is Name
.
If the value of this field is FinalObjectiveMetricValue
, any training\n jobs that did not return an objective metric are not listed.
The field to sort results by. The default is Name
.
If the value of this field is FinalObjectiveMetricValue
, any training\n jobs that did not return an objective metric are not listed.
Provides information about the location that is configured for storing model\n artifacts.
\nModel artifacts are the output that results from training a model, and typically\n consist of trained parameters, a model definition that describes how to compute\n inferences, and other metadata.
" + "smithy.api#documentation": "Provides information about the location that is configured for storing model\n artifacts.
\nModel artifacts are the output that results from training a model, and typically\n consist of trained parameters, a model definition that describes how to compute\n inferences, and other metadata.
" } }, "com.amazonaws.sagemaker#ModelBiasAppSpecification": { @@ -35698,7 +35420,7 @@ "InfrastructureType": { "target": "com.amazonaws.sagemaker#ModelInfrastructureType", "traits": { - "smithy.api#documentation": "The inference option to which to deploy your model. Possible values are the following:
\n\n RealTime
: Deploy to real-time inference.
The inference option to which to deploy your model. Possible values are the following:
\n\n RealTime
: Deploy to real-time inference.
The status of the model package. This can be one of the following values.
\n\n PENDING
- The model package is pending being created.
\n IN_PROGRESS
- The model package is in the process of being\n created.
\n COMPLETED
- The model package was successfully created.
\n FAILED
- The model package failed.
\n DELETING
- The model package is in the process of being deleted.
The status of the model package. This can be one of the following values.
\n\n PENDING
- The model package is pending being created.
\n IN_PROGRESS
- The model package is in the process of being\n created.
\n COMPLETED
- The model package was successfully created.
\n FAILED
- The model package failed.
\n DELETING
- The model package is in the process of being deleted.
The approval status of the model. This can be one of the following values.
\n\n APPROVED
- The model is approved
\n REJECTED
- The model is rejected.
\n PENDING_MANUAL_APPROVAL
- The model is waiting for manual\n approval.
The approval status of the model. This can be one of the following values.
\n\n APPROVED
- The model is approved
\n REJECTED
- The model is rejected.
\n PENDING_MANUAL_APPROVAL
- The model is waiting for manual\n approval.
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.
\nIf you are using your own custom algorithm instead of an algorithm provided by SageMaker,\n the inference code must meet SageMaker requirements. SageMaker supports both\n registry/repository[:tag]
and registry/repository[@digest]
\n image path formats. For more information, see Using Your Own Algorithms with Amazon\n SageMaker.
The Amazon EC2 Container Registry (Amazon ECR) path where inference code is stored.
\nIf you are using your own custom algorithm instead of an algorithm provided by SageMaker,\n the inference code must meet SageMaker requirements. SageMaker supports both\n registry/repository[:tag]
and registry/repository[@digest]
\n image path formats. For more information, see Using Your Own Algorithms with Amazon\n SageMaker.
The Amazon S3 path where the model artifacts, which result from model training, are stored.\n This path must point to a single gzip
compressed tar archive\n (.tar.gz
suffix).
The model artifacts must be in an S3 bucket that is in the same region as the\n model package.
\nThe Amazon S3 path where the model artifacts, which result from model training, are stored.\n This path must point to a single gzip
compressed tar archive\n (.tar.gz
suffix).
The model artifacts must be in an S3 bucket that is in the same region as the\n model package.
\nThe status of the model group. This can be one of the following values.
\n\n PENDING
- The model group is pending being created.
\n IN_PROGRESS
- The model group is in the process of being\n created.
\n COMPLETED
- The model group was successfully created.
\n FAILED
- The model group failed.
\n DELETING
- The model group is in the process of being deleted.
\n DELETE_FAILED
- SageMaker failed to delete the model group.
The status of the model group. This can be one of the following values.
\n\n PENDING
- The model group is pending being created.
\n IN_PROGRESS
- The model group is in the process of being\n created.
\n COMPLETED
- The model group was successfully created.
\n FAILED
- The model group failed.
\n DELETING
- The model group is in the process of being deleted.
\n DELETE_FAILED
- SageMaker failed to delete the model group.
The approval status of the model. This can be one of the following values.
\n\n APPROVED
- The model is approved
\n REJECTED
- The model is rejected.
\n PENDING_MANUAL_APPROVAL
- The model is waiting for manual\n approval.
The approval status of the model. This can be one of the following values.
\n\n APPROVED
- The model is approved
\n REJECTED
- The model is rejected.
\n PENDING_MANUAL_APPROVAL
- The model is waiting for manual\n approval.
Contains data, such as the inputs and targeted instance types that are used in the\n process of validating the model package.
\nThe data provided in the validation profile is made available to your buyers on Amazon Web Services\n Marketplace.
" + "smithy.api#documentation": "Contains data, such as the inputs and targeted instance types that are used in the\n process of validating the model package.
\nThe data provided in the validation profile is made available to your buyers on Amazon Web Services\n Marketplace.
" } }, "com.amazonaws.sagemaker#ModelPackageValidationProfiles": { @@ -36891,7 +36613,7 @@ "ModelName": { "target": "com.amazonaws.sagemaker#ModelName", "traits": { - "smithy.api#documentation": "The name of the Amazon SageMaker Model entity.
", + "smithy.api#documentation": "The name of the model.
", "smithy.api#required": {} } }, @@ -36932,7 +36654,7 @@ "ModelName": { "target": "com.amazonaws.sagemaker#ModelName", "traits": { - "smithy.api#documentation": "The name of the Amazon SageMaker Model entity.
", + "smithy.api#documentation": "The name of the model.
", "smithy.api#required": {} } }, @@ -36953,7 +36675,7 @@ "Status": { "target": "com.amazonaws.sagemaker#ModelVariantStatus", "traits": { - "smithy.api#documentation": "The status of deployment for the model variant on the hosted inference endpoint.
\n\n Creating
- Amazon SageMaker is preparing the model variant on the hosted inference endpoint.\n
\n InService
- The model variant is running on the hosted inference endpoint.\n
\n Updating
- Amazon SageMaker is updating the model variant on the hosted inference endpoint.\n
\n Deleting
- Amazon SageMaker is deleting the model variant on the hosted inference endpoint.\n
\n Deleted
- The model variant has been deleted on the hosted inference endpoint. This\n can only happen after stopping the experiment.\n
The status of the deployment.
", "smithy.api#required": {} } } @@ -37875,7 +37597,7 @@ "MonitoringScheduleStatus": { "target": "com.amazonaws.sagemaker#ScheduleStatus", "traits": { - "smithy.api#documentation": "The status of the monitoring schedule. This can be one of the following values.
\n\n PENDING
- The schedule is pending being created.
\n FAILED
- The schedule failed.
\n SCHEDULED
- The schedule was successfully created.
\n STOPPED
- The schedule was stopped.
The status of the monitoring schedule. This can be one of the following values.
\n\n PENDING
- The schedule is pending being created.
\n FAILED
- The schedule failed.
\n SCHEDULED
- The schedule was successfully created.
\n STOPPED
- The schedule was stopped.
Contains the notebook instance lifecycle configuration script.
\nEach lifecycle configuration script has a limit of 16384 characters.
\nThe value of the $PATH
environment variable that is available to both\n scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group\n /aws/sagemaker/NotebookInstances
in log stream\n [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs\n for longer than 5 minutes, it fails and the notebook instance is not created or\n started.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" + "smithy.api#documentation": "Contains the notebook instance lifecycle configuration script.
\nEach lifecycle configuration script has a limit of 16384 characters.
\nThe value of the $PATH
environment variable that is available to both\n scripts is /sbin:bin:/usr/sbin:/usr/bin
.
View CloudWatch Logs for notebook instance lifecycle configurations in log group\n /aws/sagemaker/NotebookInstances
in log stream\n [notebook-instance-name]/[LifecycleConfigHook]
.
Lifecycle configuration scripts cannot run for longer than 5 minutes. If a script runs\n for longer than 5 minutes, it fails and the notebook instance is not created or\n started.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" } }, "com.amazonaws.sagemaker#NotebookInstanceName": { @@ -38672,7 +38394,7 @@ "NotebookInstanceLifecycleConfigName": { "target": "com.amazonaws.sagemaker#NotebookInstanceLifecycleConfigName", "traits": { - "smithy.api#documentation": "The name of a notebook instance lifecycle configuration associated with this notebook\n instance.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" + "smithy.api#documentation": "The name of a notebook instance lifecycle configuration associated with this notebook\n instance.
\nFor information about notebook instance lifestyle configurations, see Step\n 2.1: (Optional) Customize a Notebook Instance.
" } }, "DefaultCodeRepository": { @@ -38843,7 +38565,7 @@ "TableFormat": { "target": "com.amazonaws.sagemaker#TableFormat", "traits": { - "smithy.api#documentation": "Format for the offline store table. Supported formats are Glue (Default) and Apache Iceberg.
" + "smithy.api#documentation": "Format for the offline store feature group. Iceberg
is the optimal format for feature groups shared between offline and online stores.
Contains information about a target platform that you want your model to run on, such\n as OS, architecture, and accelerators. It is an alternative of\n TargetDevice
.
The following examples show how to configure the TargetPlatform
and\n CompilerOptions
JSON strings for popular target platforms:
Raspberry Pi 3 Model B+
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},
\n
\n \"CompilerOptions\": {'mattr': ['+neon']}
\n
Jetson TX2
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\":\n \"NVIDIA\"},
\n
\n \"CompilerOptions\": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1',\n 'cuda-ver': '10.0'}
\n
EC2 m5.2xlarge instance OS
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\":\n \"NVIDIA\"},
\n
\n \"CompilerOptions\": {'mcpu': 'skylake-avx512'}
\n
RK3399
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\":\n \"MALI\"}
\n
ARMv7 phone (CPU)
\n\n \"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},
\n
\n \"CompilerOptions\": {'ANDROID_PLATFORM': 25, 'mattr':\n ['+neon']}
\n
ARMv8 phone (CPU)
\n\n \"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},
\n
\n \"CompilerOptions\": {'ANDROID_PLATFORM': 29}
\n
Contains information about a target platform that you want your model to run on, such\n as OS, architecture, and accelerators. It is an alternative of\n TargetDevice
.
The following examples show how to configure the TargetPlatform
and\n CompilerOptions
JSON strings for popular target platforms:
Raspberry Pi 3 Model B+
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM_EABIHF\"},
\n
\n \"CompilerOptions\": {'mattr': ['+neon']}
\n
Jetson TX2
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\":\n \"NVIDIA\"},
\n
\n \"CompilerOptions\": {'gpu-code': 'sm_62', 'trt-ver': '6.0.1',\n 'cuda-ver': '10.0'}
\n
EC2 m5.2xlarge instance OS
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"X86_64\", \"Accelerator\":\n \"NVIDIA\"},
\n
\n \"CompilerOptions\": {'mcpu': 'skylake-avx512'}
\n
RK3399
\n\n \"TargetPlatform\": {\"Os\": \"LINUX\", \"Arch\": \"ARM64\", \"Accelerator\":\n \"MALI\"}
\n
ARMv7 phone (CPU)
\n\n \"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM_EABI\"},
\n
\n \"CompilerOptions\": {'ANDROID_PLATFORM': 25, 'mattr':\n ['+neon']}
\n
ARMv8 phone (CPU)
\n\n \"TargetPlatform\": {\"Os\": \"ANDROID\", \"Arch\": \"ARM64\"},
\n
\n \"CompilerOptions\": {'ANDROID_PLATFORM': 29}
\n
Specifies additional parameters for compiler options in JSON format. The compiler\n options are TargetPlatform
specific. It is required for NVIDIA accelerators\n and highly recommended for CPU compilations. For any other cases, it is optional to\n specify CompilerOptions.
\n
\n DTYPE
: Specifies the data type for the input. When compiling for\n ml_*
(except for ml_inf
) instances using PyTorch\n framework, provide the data type (dtype) of the model's input.\n \"float32\"
is used if \"DTYPE\"
is not specified.\n Options for data type are:
float32: Use either \"float\"
or \"float32\"
.
int64: Use either \"int64\"
or \"long\"
.
For example, {\"dtype\" : \"float32\"}
.
\n CPU
: Compilation for CPU supports the following compiler\n options.
\n mcpu
: CPU micro-architecture. For example, {'mcpu':\n 'skylake-avx512'}
\n
\n mattr
: CPU flags. For example, {'mattr': ['+neon',\n '+vfpv4']}
\n
\n ARM
: Details of ARM CPU compilations.
\n NEON
: NEON is an implementation of the Advanced SIMD\n extension used in ARMv7 processors.
For example, add {'mattr': ['+neon']}
to the compiler\n options if compiling for ARM 32-bit platform with the NEON\n support.
\n NVIDIA
: Compilation for NVIDIA GPU supports the following\n compiler options.
\n gpu_code
: Specifies the targeted architecture.
\n trt-ver
: Specifies the TensorRT versions in x.y.z.\n format.
\n cuda-ver
: Specifies the CUDA version in x.y\n format.
For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver':\n '10.1'}
\n
\n ANDROID
: Compilation for the Android OS supports the following\n compiler options:
\n ANDROID_PLATFORM
: Specifies the Android API levels.\n Available levels range from 21 to 29. For example,\n {'ANDROID_PLATFORM': 28}
.
\n mattr
: Add {'mattr': ['+neon']}
to compiler\n options if compiling for ARM 32-bit platform with NEON support.
\n INFERENTIA
: Compilation for target ml_inf1 uses compiler options\n passed in as a JSON string. For example,\n \"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"
.\n
For information about supported compiler options, see\n \n Neuron Compiler CLI.\n
\n\n CoreML
: Compilation for the CoreML OutputConfig$TargetDevice\n supports the following compiler options:
\n class_labels
: Specifies the classification labels file\n name inside input tar.gz file. For example,\n {\"class_labels\": \"imagenet_labels_1000.txt\"}
.\n Labels inside the txt file should be separated by newlines.
\n EIA
: Compilation for the Elastic Inference Accelerator supports the following \n compiler options:
\n precision_mode
: Specifies the precision of compiled artifacts. Supported values\n are \"FP16\"
and \"FP32\"
. Default is\n \"FP32\"
.
\n signature_def_key
: Specifies the signature to use for models in SavedModel\n format. Defaults is TensorFlow's default signature def key.
\n output_names
: Specifies a list of output tensor names for \n models in FrozenGraph format. Set at most one API field, either: signature_def_key
or output_names
.
For example: \n {\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}
\n
Specifies additional parameters for compiler options in JSON format. The compiler\n options are TargetPlatform
specific. It is required for NVIDIA accelerators\n and highly recommended for CPU compilations. For any other cases, it is optional to\n specify CompilerOptions.
\n
\n DTYPE
: Specifies the data type for the input. When compiling for\n ml_*
(except for ml_inf
) instances using PyTorch\n framework, provide the data type (dtype) of the model's input.\n \"float32\"
is used if \"DTYPE\"
is not specified.\n Options for data type are:
float32: Use either \"float\"
or \"float32\"
.
int64: Use either \"int64\"
or \"long\"
.
For example, {\"dtype\" : \"float32\"}
.
\n CPU
: Compilation for CPU supports the following compiler\n options.
\n mcpu
: CPU micro-architecture. For example, {'mcpu':\n 'skylake-avx512'}
\n
\n mattr
: CPU flags. For example, {'mattr': ['+neon',\n '+vfpv4']}
\n
\n ARM
: Details of ARM CPU compilations.
\n NEON
: NEON is an implementation of the Advanced SIMD\n extension used in ARMv7 processors.
For example, add {'mattr': ['+neon']}
to the compiler\n options if compiling for ARM 32-bit platform with the NEON\n support.
\n NVIDIA
: Compilation for NVIDIA GPU supports the following\n compiler options.
\n gpu_code
: Specifies the targeted architecture.
\n trt-ver
: Specifies the TensorRT versions in x.y.z.\n format.
\n cuda-ver
: Specifies the CUDA version in x.y\n format.
For example, {'gpu-code': 'sm_72', 'trt-ver': '6.0.1', 'cuda-ver':\n '10.1'}
\n
\n ANDROID
: Compilation for the Android OS supports the following\n compiler options:
\n ANDROID_PLATFORM
: Specifies the Android API levels.\n Available levels range from 21 to 29. For example,\n {'ANDROID_PLATFORM': 28}
.
\n mattr
: Add {'mattr': ['+neon']}
to compiler\n options if compiling for ARM 32-bit platform with NEON support.
\n INFERENTIA
: Compilation for target ml_inf1 uses compiler options\n passed in as a JSON string. For example,\n \"CompilerOptions\": \"\\\"--verbose 1 --num-neuroncores 2 -O2\\\"\"
.\n
For information about supported compiler options, see\n \n Neuron Compiler CLI.\n
\n\n CoreML
: Compilation for the CoreML OutputConfig$TargetDevice\n supports the following compiler options:
\n class_labels
: Specifies the classification labels file\n name inside input tar.gz file. For example,\n {\"class_labels\": \"imagenet_labels_1000.txt\"}
.\n Labels inside the txt file should be separated by newlines.
\n EIA
: Compilation for the Elastic Inference Accelerator supports the following \n compiler options:
\n precision_mode
: Specifies the precision of compiled artifacts. Supported values\n are \"FP16\"
and \"FP32\"
. Default is\n \"FP32\"
.
\n signature_def_key
: Specifies the signature to use for models in SavedModel\n format. Defaults is TensorFlow's default signature def key.
\n output_names
: Specifies a list of output tensor names for \n models in FrozenGraph format. Set at most one API field, either: signature_def_key
or output_names
.
For example: \n {\"precision_mode\": \"FP32\", \"output_names\": [\"output:0\"]}
\n
The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption\n after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account.\n For more information, see\n KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
\nThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The Amazon Web Services Key Management Service key (Amazon Web Services KMS) that Amazon SageMaker uses to encrypt your output models with Amazon S3 server-side encryption\n after compilation job. If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account.\n For more information, see\n KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
\n\tThe KmsKeyId can be any of the following formats:
\nKey ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker\n uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The\n KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n SageMaker uses the default KMS key for Amazon S3 for your role's account. SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateTrainingJob
, CreateTransformJob
, or\n CreateHyperParameterTuningJob
requests. For more information, see\n Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services\n Key Management Service Developer Guide.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker\n uses to encrypt the model artifacts at rest using Amazon S3 server-side encryption. The\n KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n SageMaker uses the default KMS key for Amazon S3 for your role's account. SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateTrainingJob
, CreateTransformJob
, or\n CreateHyperParameterTuningJob
requests. For more information, see\n Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services\n Key Management Service Developer Guide.
Specifies ranges of integer, continuous, and categorical hyperparameters that a\n hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs\n with hyperparameter values within these ranges to find the combination of values that\n result in the training job with the best performance as measured by the objective metric\n of the hyperparameter tuning job.
\nThe maximum number of items specified for Array Members
refers to the\n maximum number of hyperparameters for each range and also the maximum for the\n hyperparameter tuning job itself. That is, the sum of the number of hyperparameters\n for all the ranges can't exceed the maximum number specified.
Specifies ranges of integer, continuous, and categorical hyperparameters that a\n hyperparameter tuning job searches. The hyperparameter tuning job launches training jobs\n with hyperparameter values within these ranges to find the combination of values that\n result in the training job with the best performance as measured by the objective metric\n of the hyperparameter tuning job.
\nThe maximum number of items specified for Array Members
refers to the\n maximum number of hyperparameters for each range and also the maximum for the\n hyperparameter tuning job itself. That is, the sum of the number of hyperparameters\n for all the ranges can't exceed the maximum number specified.
An array of PendingProductionVariantSummary objects, one for each model\n hosted behind this endpoint for the in-progress deployment.
" + "smithy.api#documentation": "List of PendingProductionVariantSummary
objects.
An array of PendingProductionVariantSummary objects, one for each model\n hosted behind this endpoint in shadow mode with production traffic replicated from the\n model specified on ProductionVariants
for the in-progress deployment.
\n Array of ProductionVariant
objects, one for each model that you want to host at this endpoint\n in shadow mode with production traffic replicated from the model specified on\n ProductionVariants
.If you use this field, you can only specify one variant for\n ProductionVariants
and one variant for ShadowProductionVariants
.\n
The size of the ML storage volume in gigabytes that you want to provision. You must\n specify sufficient ML storage for your scenario.
\nCertain Nitro-based instances include local storage with a fixed total size,\n dependent on the instance type. When using these instances for processing, Amazon SageMaker mounts\n the local instance storage instead of Amazon EBS gp2 storage. You can't request a\n VolumeSizeInGB
greater than the total size of the local instance\n storage.
For a list of instance types that support local instance storage, including the\n total size per instance type, see Instance Store Volumes.
\nThe size of the ML storage volume in gigabytes that you want to provision. You must\n specify sufficient ML storage for your scenario.
\nCertain Nitro-based instances include local storage with a fixed total size,\n dependent on the instance type. When using these instances for processing, Amazon SageMaker mounts\n the local instance storage instead of Amazon EBS gp2 storage. You can't request a\n VolumeSizeInGB
greater than the total size of the local instance\n storage.
For a list of instance types that support local instance storage, including the\n total size per instance type, see Instance Store Volumes.
\nThe Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the\n storage volume attached to the ML compute instance(s) that run the processing job.\n
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId
when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nThe Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the\n storage volume attached to the ML compute instance(s) that run the processing job.\n
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId
when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nThe Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker\n uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. The\n KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n SageMaker uses the default KMS key for Amazon S3 for your role's account. SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
and UpdateEndpoint
requests. For more\n information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management\n Service Developer Guide.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that SageMaker\n uses to encrypt the core dump data at rest using Amazon S3 server-side encryption. The\n KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your KMS key, the SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n SageMaker uses the default KMS key for Amazon S3 for your role's account. SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
and UpdateEndpoint
requests. For more\n information, see Using Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management\n Service Developer Guide.
The endpoint variant status which describes the current deployment stage status or\n operational status.
\n\n Creating
: Creating inference resources for the production\n variant.
\n Deleting
: Terminating inference resources for the production\n variant.
\n Updating
: Updating capacity for the production variant.
\n ActivatingTraffic
: Turning on traffic for the production\n variant.
\n Baking
: Waiting period to monitor the CloudWatch alarms in the\n automatic rollback configuration.
The endpoint variant status which describes the current deployment stage status or\n operational status.
\n\n Creating
: Creating inference resources for the production\n variant.
\n Deleting
: Terminating inference resources for the production\n variant.
\n Updating
: Updating capacity for the production variant.
\n ActivatingTraffic
: Turning on traffic for the production\n variant.
\n Baking
: Waiting period to monitor the CloudWatch alarms in the\n automatic rollback configuration.
Defines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.
\nUse one of the following prices for bounding box tasks. Prices are in US dollars and\n should be based on the complexity of the task; the longer it takes in your initial\n testing, the more you should offer.
\n0.036
\n0.048
\n0.060
\n0.072
\n0.120
\n0.240
\n0.360
\n0.480
\n0.600
\n0.720
\n0.840
\n0.960
\n1.080
\n1.200
\nUse one of the following prices for image classification, text classification, and\n custom tasks. Prices are in US dollars.
\n0.012
\n0.024
\n0.036
\n0.048
\n0.060
\n0.072
\n0.120
\n0.240
\n0.360
\n0.480
\n0.600
\n0.720
\n0.840
\n0.960
\n1.080
\n1.200
\nUse one of the following prices for semantic segmentation tasks. Prices are in US\n dollars.
\n0.840
\n0.960
\n1.080
\n1.200
\nUse one of the following prices for Textract AnalyzeDocument Important Form Key Amazon\n Augmented AI review tasks. Prices are in US dollars.
\n2.400
\n2.280
\n2.160
\n2.040
\n1.920
\n1.800
\n1.680
\n1.560
\n1.440
\n1.320
\n1.200
\n1.080
\n0.960
\n0.840
\n0.720
\n0.600
\n0.480
\n0.360
\n0.240
\n0.120
\n0.072
\n0.060
\n0.048
\n0.036
\n0.024
\n0.012
\nUse one of the following prices for Rekognition DetectModerationLabels Amazon\n Augmented AI review tasks. Prices are in US dollars.
\n1.200
\n1.080
\n0.960
\n0.840
\n0.720
\n0.600
\n0.480
\n0.360
\n0.240
\n0.120
\n0.072
\n0.060
\n0.048
\n0.036
\n0.024
\n0.012
\nUse one of the following prices for Amazon Augmented AI custom human review tasks.\n Prices are in US dollars.
\n1.200
\n1.080
\n0.960
\n0.840
\n0.720
\n0.600
\n0.480
\n0.360
\n0.240
\n0.120
\n0.072
\n0.060
\n0.048
\n0.036
\n0.024
\n0.012
\nDefines the amount of money paid to an Amazon Mechanical Turk worker for each task performed.
\nUse one of the following prices for bounding box tasks. Prices are in US dollars and\n should be based on the complexity of the task; the longer it takes in your initial\n testing, the more you should offer.
\n0.036
\n0.048
\n0.060
\n0.072
\n0.120
\n0.240
\n0.360
\n0.480
\n0.600
\n0.720
\n0.840
\n0.960
\n1.080
\n1.200
\nUse one of the following prices for image classification, text classification, and\n custom tasks. Prices are in US dollars.
\n0.012
\n0.024
\n0.036
\n0.048
\n0.060
\n0.072
\n0.120
\n0.240
\n0.360
\n0.480
\n0.600
\n0.720
\n0.840
\n0.960
\n1.080
\n1.200
\nUse one of the following prices for semantic segmentation tasks. Prices are in US\n dollars.
\n0.840
\n0.960
\n1.080
\n1.200
\nUse one of the following prices for Textract AnalyzeDocument Important Form Key Amazon\n Augmented AI review tasks. Prices are in US dollars.
\n2.400
\n2.280
\n2.160
\n2.040
\n1.920
\n1.800
\n1.680
\n1.560
\n1.440
\n1.320
\n1.200
\n1.080
\n0.960
\n0.840
\n0.720
\n0.600
\n0.480
\n0.360
\n0.240
\n0.120
\n0.072
\n0.060
\n0.048
\n0.036
\n0.024
\n0.012
\nUse one of the following prices for Rekognition DetectModerationLabels Amazon\n Augmented AI review tasks. Prices are in US dollars.
\n1.200
\n1.080
\n0.960
\n0.840
\n0.720
\n0.600
\n0.480
\n0.360
\n0.240
\n0.120
\n0.072
\n0.060
\n0.048
\n0.036
\n0.024
\n0.012
\nUse one of the following prices for Amazon Augmented AI custom human review tasks.\n Prices are in US dollars.
\n1.200
\n1.080
\n0.960
\n0.840
\n0.720
\n0.600
\n0.480
\n0.360
\n0.240
\n0.120
\n0.072
\n0.060
\n0.048
\n0.036
\n0.024
\n0.012
\nA URL pointing to an RStudio Connect server.
" - } - }, - "RStudioPackageManagerUrl": { - "target": "com.amazonaws.sagemaker#String", - "traits": { - "smithy.api#documentation": "A URL pointing to an RStudio Package Manager server.
" - } } }, "traits": { @@ -43549,34 +43232,26 @@ } } }, - "com.amazonaws.sagemaker#RandomSeed": { - "type": "integer", - "traits": { - "smithy.api#range": { - "min": 0 - } - } - }, "com.amazonaws.sagemaker#RealTimeInferenceConfig": { "type": "structure", "members": { "InstanceType": { "target": "com.amazonaws.sagemaker#InstanceType", "traits": { - "smithy.api#documentation": "The instance type the model is deployed to.
", + "smithy.api#documentation": "The number of instances of the type specified by InstanceType
.
The number of instances of the type specified by InstanceType
.
The instance type the model is deployed to.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "The infrastructure configuration for deploying the model to a real-time inference endpoint.
" + "smithy.api#documentation": "The infrastructure configuration for deploying the model to real-time inference.
" } }, "com.amazonaws.sagemaker#RealtimeInferenceInstanceTypes": { @@ -43734,7 +43409,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service (Amazon Web Services KMS) key \n that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. \n This key will be passed to SageMaker Hosting for endpoint creation.
\nThe SageMaker execution role must have kms:CreateGrant
permission in order to encrypt data on the storage \n volume of the endpoints created for inference recommendation. The inference recommendation job will fail \n asynchronously during endpoint configuration creation if the role passed does not have \n kms:CreateGrant
permission.
The KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:
\n
For more information about key identifiers, see \n Key identifiers (KeyID) in the \n Amazon Web Services Key Management Service (Amazon Web Services KMS) documentation.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service (Amazon Web Services KMS) key \n that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint. \n This key will be passed to SageMaker Hosting for endpoint creation.
\n \nThe SageMaker execution role must have kms:CreateGrant
permission in order to encrypt data on the storage \n volume of the endpoints created for inference recommendation. The inference recommendation job will fail \n asynchronously during endpoint configuration creation if the role passed does not have \n kms:CreateGrant
permission.
The KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:
\n
For more information about key identifiers, see \n Key identifiers (KeyID) in the \n Amazon Web Services Key Management Service (Amazon Web Services KMS) documentation.
" } }, "ContainerConfig": { @@ -43748,12 +43423,6 @@ "traits": { "smithy.api#documentation": "Existing customer endpoints on which to run an Inference Recommender job.
" } - }, - "VpcConfig": { - "target": "com.amazonaws.sagemaker#RecommendationJobVpcConfig", - "traits": { - "smithy.api#documentation": "Inference Recommender provisions SageMaker endpoints with access to VPC in the inference recommendation job.
" - } } }, "traits": { @@ -43776,7 +43445,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service (Amazon Web Services KMS) key \n that Amazon SageMaker uses to encrypt your output artifacts with Amazon S3 server-side encryption. \n The SageMaker execution role must have kms:GenerateDataKey
permission.
The KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:
\n
For more information about key identifiers, see \n Key identifiers (KeyID) in the \n Amazon Web Services Key Management Service (Amazon Web Services KMS) documentation.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of a Amazon Web Services Key Management Service (Amazon Web Services KMS) key \n that Amazon SageMaker uses to encrypt your output artifacts with Amazon S3 server-side encryption. \n The SageMaker execution role must have kms:GenerateDataKey
permission.
The KmsKeyId
can be any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:
\n
For more information about key identifiers, see \n Key identifiers (KeyID) in the \n Amazon Web Services Key Management Service (Amazon Web Services KMS) documentation.
" } }, "CompiledOutputConfig": { @@ -43921,72 +43590,6 @@ } } }, - "com.amazonaws.sagemaker#RecommendationJobVpcConfig": { - "type": "structure", - "members": { - "SecurityGroupIds": { - "target": "com.amazonaws.sagemaker#RecommendationJobVpcSecurityGroupIds", - "traits": { - "smithy.api#documentation": "The VPC security group IDs. IDs have the form of sg-xxxxxxxx
.\n Specify the security groups for the VPC that is specified in the Subnets
field.
The ID of the subnets in the VPC to which you want to connect your model.
", - "smithy.api#required": {} - } - } - }, - "traits": { - "smithy.api#documentation": "Inference Recommender provisions SageMaker endpoints with access to VPC in the inference recommendation job.
" - } - }, - "com.amazonaws.sagemaker#RecommendationJobVpcSecurityGroupId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 32 - }, - "smithy.api#pattern": "^[-0-9a-zA-Z]+$" - } - }, - "com.amazonaws.sagemaker#RecommendationJobVpcSecurityGroupIds": { - "type": "list", - "member": { - "target": "com.amazonaws.sagemaker#RecommendationJobVpcSecurityGroupId" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 5 - } - } - }, - "com.amazonaws.sagemaker#RecommendationJobVpcSubnetId": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 32 - }, - "smithy.api#pattern": "^[-0-9a-zA-Z]+$" - } - }, - "com.amazonaws.sagemaker#RecommendationJobVpcSubnets": { - "type": "list", - "member": { - "target": "com.amazonaws.sagemaker#RecommendationJobVpcSubnetId" - }, - "traits": { - "smithy.api#length": { - "min": 1, - "max": 16 - } - } - }, "com.amazonaws.sagemaker#RecommendationMetrics": { "type": "structure", "members": { @@ -44274,16 +43877,6 @@ "smithy.api#documentation": "Metadata for a register model job step.
" } }, - "com.amazonaws.sagemaker#ReleaseNotes": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 255 - }, - "smithy.api#pattern": ".*" - } - }, "com.amazonaws.sagemaker#RenderUiTemplate": { "type": "operation", "input": { @@ -44327,7 +43920,7 @@ "HumanTaskUiArn": { "target": "com.amazonaws.sagemaker#HumanTaskUiArn", "traits": { - "smithy.api#documentation": "The HumanTaskUiArn
of the worker UI that you want to render. Do not\n provide a HumanTaskUiArn
if you use the UiTemplate
\n parameter.
See a list of available Human Ui Amazon Resource Names (ARNs) in UiConfig.
" + "smithy.api#documentation": "The HumanTaskUiArn
of the worker UI that you want to render. Do not\n provide a HumanTaskUiArn
if you use the UiTemplate
\n parameter.
See a list of available Human Ui Amazon Resource Names (ARNs) in UiConfig.
" } } } @@ -44482,7 +44075,7 @@ "InstanceType": { "target": "com.amazonaws.sagemaker#TrainingInstanceType", "traits": { - "smithy.api#documentation": "The ML compute instance type.
\nSageMaker Training on Amazon Elastic Compute Cloud (EC2) P4de instances is in preview release starting\n December 9th, 2022.
\n\n Amazon EC2 P4de instances\n (currently in preview) are powered by 8 NVIDIA A100 GPUs with 80GB high-performance\n HBM2e GPU memory, which accelerate the speed of training ML models that need to be\n trained on large datasets of high-resolution data. In this preview release, Amazon SageMaker\n supports ML training jobs on P4de instances (ml.p4de.24xlarge
) to\n reduce model training time. The ml.p4de.24xlarge
instances are\n available in the following Amazon Web Services Regions.
US East (N. Virginia) (us-east-1)
\nUS West (Oregon) (us-west-2)
\nTo request quota limit increase and start using P4de instances, contact the SageMaker\n Training service team through your account team.
\nThe ML compute instance type.
" } }, "InstanceCount": { @@ -44496,14 +44089,14 @@ "target": "com.amazonaws.sagemaker#VolumeSizeInGB", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The size of the ML storage volume that you want to provision.
\nML storage volumes store model artifacts and incremental states. Training\n algorithms might also use the ML storage volume for scratch space. If you want to store\n the training data in the ML storage volume, choose File
as the\n TrainingInputMode
in the algorithm specification.
When using an ML instance with NVMe SSD\n volumes, SageMaker doesn't provision Amazon EBS General Purpose SSD (gp2) storage.\n Available storage is fixed to the NVMe-type instance's storage capacity. SageMaker configures\n storage paths for training datasets, checkpoints, model artifacts, and outputs to use\n the entire capacity of the instance storage. For example, ML instance families with the\n NVMe-type instance storage include ml.p4d
, ml.g4dn
, and\n ml.g5
.
When using an ML instance with the EBS-only storage option and without instance\n storage, you must define the size of EBS volume through VolumeSizeInGB
in\n the ResourceConfig
API. For example, ML instance families that use EBS\n volumes include ml.c5
and ml.p2
.
To look up instance types and their instance storage types and volumes, see Amazon EC2 Instance Types.
\nTo find the default local paths defined by the SageMaker training platform, see Amazon SageMaker\n Training Storage Folders for Training Datasets, Checkpoints, Model Artifacts, and\n Outputs.
", + "smithy.api#documentation": "The size of the ML storage volume that you want to provision.
\nML storage volumes store model artifacts and incremental states. Training\n algorithms might also use the ML storage volume for scratch space. If you want to store\n the training data in the ML storage volume, choose File
as the\n TrainingInputMode
in the algorithm specification.
When using an ML instance with NVMe SSD\n volumes, SageMaker doesn't provision Amazon EBS General Purpose SSD (gp2) storage.\n Available storage is fixed to the NVMe-type instance's storage capacity. SageMaker configures\n storage paths for training datasets, checkpoints, model artifacts, and outputs to use\n the entire capacity of the instance storage. For example, ML instance families with the\n NVMe-type instance storage include ml.p4d
, ml.g4dn
, and\n ml.g5
.
When using an ML instance with the EBS-only storage option and without instance\n storage, you must define the size of EBS volume through VolumeSizeInGB
in\n the ResourceConfig
API. For example, ML instance families that use EBS\n volumes include ml.c5
and ml.p2
.
To look up instance types and their instance storage types and volumes, see Amazon EC2 Instance Types.
\nTo find the default local paths defined by the SageMaker training platform, see Amazon SageMaker\n Training Storage Folders for Training Datasets, Checkpoints, Model Artifacts, and\n Outputs.
", "smithy.api#required": {} } }, "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services KMS key that SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training job.
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId
when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nThe VolumeKmsKeyId
can be in any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The Amazon Web Services KMS key that SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training job.
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId
when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\nThe VolumeKmsKeyId
can be in any of the following formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Describes the resources, including machine learning (ML) compute instances and ML\n storage volumes, to use for model training.
" + "smithy.api#documentation": "Describes the resources, including ML compute instances and ML storage volumes, to\n use for model training.
" } }, "com.amazonaws.sagemaker#ResourceConfigForUpdate": { @@ -44999,21 +44592,21 @@ "S3DataType": { "target": "com.amazonaws.sagemaker#S3DataType", "traits": { - "smithy.api#documentation": "If you choose S3Prefix
, S3Uri
identifies a key name prefix.\n SageMaker uses all objects that match the specified key name prefix for model training.
If you choose ManifestFile
, S3Uri
identifies an object that\n is a manifest file containing a list of object keys that you want SageMaker to use for model\n training.
If you choose AugmentedManifestFile
, S3Uri identifies an object that is\n an augmented manifest file in JSON lines format. This file contains the data you want to\n use for model training. AugmentedManifestFile
can only be used if the\n Channel's input mode is Pipe
.
If you choose S3Prefix
, S3Uri
identifies a key name prefix.\n SageMaker uses all objects that match the specified key name prefix for model training.
If you choose ManifestFile
, S3Uri
identifies an object that\n is a manifest file containing a list of object keys that you want SageMaker to use for model\n training.
If you choose AugmentedManifestFile
, S3Uri identifies an object that is\n an augmented manifest file in JSON lines format. This file contains the data you want to\n use for model training. AugmentedManifestFile
can only be used if the\n Channel's input mode is Pipe
.
Depending on the value specified for the S3DataType
, identifies either\n a key name prefix or a manifest. For example:
A key name prefix might look like this:\n s3://bucketname/exampleprefix
\n
A manifest might look like this:\n s3://bucketname/example.manifest
\n
A manifest is an S3 object which is a JSON file consisting of an array of\n elements. The first element is a prefix which is followed by one or more\n suffixes. SageMaker appends the suffix elements to the prefix to get a full set\n of S3Uri
. Note that the prefix must be a valid non-empty\n S3Uri
that precludes users from specifying a manifest whose\n individual S3Uri
is sourced from different S3 buckets.
The following code example shows a valid manifest format:
\n\n [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\n
\n \"relative/path/to/custdata-1\",
\n
\n \"relative/path/custdata-2\",
\n
\n ...
\n
\n \"relative/path/custdata-N\"
\n
\n ]
\n
This JSON is equivalent to the following S3Uri
\n list:
\n s3://customer_bucket/some/prefix/relative/path/to/custdata-1
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-2
\n
\n ...
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-N
\n
The complete set of S3Uri
in this manifest is the input data\n for the channel for this data source. The object that each S3Uri
\n points to must be readable by the IAM role that SageMaker uses to perform tasks on\n your behalf.
Depending on the value specified for the S3DataType
, identifies either\n a key name prefix or a manifest. For example:
A key name prefix might look like this:\n s3://bucketname/exampleprefix
\n
A manifest might look like this:\n s3://bucketname/example.manifest
\n
A manifest is an S3 object which is a JSON file consisting of an array of\n elements. The first element is a prefix which is followed by one or more\n suffixes. SageMaker appends the suffix elements to the prefix to get a full set\n of S3Uri
. Note that the prefix must be a valid non-empty\n S3Uri
that precludes users from specifying a manifest whose\n individual S3Uri
is sourced from different S3 buckets.
The following code example shows a valid manifest format:
\n\n [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\n
\n \"relative/path/to/custdata-1\",
\n
\n \"relative/path/custdata-2\",
\n
\n ...
\n
\n \"relative/path/custdata-N\"
\n
\n ]
\n
This JSON is equivalent to the following S3Uri
\n list:
\n s3://customer_bucket/some/prefix/relative/path/to/custdata-1
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-2
\n
\n ...
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-N
\n
The complete set of S3Uri
in this manifest is the input data\n for the channel for this data source. The object that each S3Uri
\n points to must be readable by the IAM role that SageMaker uses to perform tasks on\n your behalf.
If you want SageMaker to replicate the entire dataset on each ML compute instance that\n is launched for model training, specify FullyReplicated
.
If you want SageMaker to replicate a subset of data on each ML compute instance that is\n launched for model training, specify ShardedByS3Key
. If there are\n n ML compute instances launched for a training job, each\n instance gets approximately 1/n of the number of S3 objects. In\n this case, model training on each machine uses only the subset of training data.
Don't choose more ML compute instances for training than available S3 objects. If\n you do, some nodes won't get any data and you will pay for nodes that aren't getting any\n training data. This applies in both File and Pipe modes. Keep this in mind when\n developing algorithms.
\nIn distributed training, where you use multiple ML compute EC2 instances, you might\n choose ShardedByS3Key
. If the algorithm requires copying training data to\n the ML storage volume (when TrainingInputMode
is set to File
),\n this copies 1/n of the number of objects.
If you want SageMaker to replicate the entire dataset on each ML compute instance that\n is launched for model training, specify FullyReplicated
.
If you want SageMaker to replicate a subset of data on each ML compute instance that is\n launched for model training, specify ShardedByS3Key
. If there are\n n ML compute instances launched for a training job, each\n instance gets approximately 1/n of the number of S3 objects. In\n this case, model training on each machine uses only the subset of training data.
Don't choose more ML compute instances for training than available S3 objects. If\n you do, some nodes won't get any data and you will pay for nodes that aren't getting any\n training data. This applies in both File and Pipe modes. Keep this in mind when\n developing algorithms.
\nIn distributed training, where you use multiple ML compute EC2 instances, you might\n choose ShardedByS3Key
. If the algorithm requires copying training data to\n the ML storage volume (when TrainingInputMode
is set to File
),\n this copies 1/n of the number of objects.
Provides APIs for creating and managing SageMaker resources.
\nOther Resources:
\nProvides APIs for creating and managing SageMaker resources.
\nOther Resources:
\nA cron expression that describes details about the monitoring schedule.
\nCurrently the only supported cron expressions are:
\nIf you want to set the job to start every hour, please use the following:
\n\n Hourly: cron(0 * ? * * *)
\n
If you want to start the job daily:
\n\n cron(0 [00-23] ? * * *)
\n
For example, the following are valid cron expressions:
\nDaily at noon UTC: cron(0 12 ? * * *)
\n
Daily at midnight UTC: cron(0 0 ? * * *)
\n
To support running every 6, 12 hours, the following are also supported:
\n\n cron(0 [00-23]/[01-24] ? * * *)
\n
For example, the following are valid cron expressions:
\nEvery 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)
\n
Every two hours starting at midnight: cron(0 0/2 ? * * *)
\n
Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.
\nWe recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker will pick a time for running every day.
\nA cron expression that describes details about the monitoring schedule.
\n\nCurrently the only supported cron expressions are:
\nIf you want to set the job to start every hour, please use the following:
\n\n Hourly: cron(0 * ? * * *)
\n
If you want to start the job daily:
\n\n cron(0 [00-23] ? * * *)
\n
For example, the following are valid cron expressions:
\nDaily at noon UTC: cron(0 12 ? * * *)
\n
Daily at midnight UTC: cron(0 0 ? * * *)
\n
To support running every 6, 12 hours, the following are also supported:
\n\n cron(0 [00-23]/[01-24] ? * * *)
\n
For example, the following are valid cron expressions:
\nEvery 12 hours, starting at 5pm UTC: cron(0 17/12 ? * * *)
\n
Every two hours starting at midnight: cron(0 0/2 ? * * *)
\n
Even though the cron expression is set to start at 5PM UTC, note that there\n could be a delay of 0-20 minutes from the actual requested time to run the\n execution.
\nWe recommend that if you would like a daily schedule, you do not provide this\n parameter. Amazon SageMaker will pick a time for running every day.
\nContains a secondary status information from a training\n job.
\nStatus might be one of the following secondary statuses:
\n\n Starting
\n - Starting the training job.
\n Downloading
- An optional stage for algorithms that\n support File
training input mode. It indicates that\n data is being downloaded to the ML storage volumes.
\n Training
- Training is in progress.
\n Uploading
- Training is complete and the model\n artifacts are being uploaded to the S3 location.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. The reason for\n the failure is returned in the FailureReason
field of\n DescribeTrainingJobResponse
.
\n MaxRuntimeExceeded
- The job stopped because it\n exceeded the maximum allowed runtime.
\n Stopped
- The training job has stopped.
\n Stopping
- Stopping the training job.
We no longer support the following secondary statuses:
\n\n LaunchingMLInstances
\n
\n PreparingTrainingStack
\n
\n DownloadingTrainingImage
\n
Contains a secondary status information from a training\n job.
\nStatus might be one of the following secondary statuses:
\n\n Starting
\n - Starting the training job.
\n Downloading
- An optional stage for algorithms that\n support File
training input mode. It indicates that\n data is being downloaded to the ML storage volumes.
\n Training
- Training is in progress.
\n Uploading
- Training is complete and the model\n artifacts are being uploaded to the S3 location.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. The reason for\n the failure is returned in the FailureReason
field of\n DescribeTrainingJobResponse
.
\n MaxRuntimeExceeded
- The job stopped because it\n exceeded the maximum allowed runtime.
\n Stopped
- The training job has stopped.
\n Stopping
- Stopping the training job.
We no longer support the following secondary statuses:
\n\n LaunchingMLInstances
\n
\n PreparingTrainingStack
\n
\n DownloadingTrainingImage
\n
A detailed description of the progress within a secondary status.\n
\nSageMaker provides secondary statuses and status messages that apply to each of\n them:
\nStarting the training job.
\nLaunching requested ML\n instances.
\nInsufficient\n capacity error from EC2 while launching instances,\n retrying!
\nLaunched\n instance was unhealthy, replacing it!
\nPreparing the instances for training.
\nDownloading the training image.
\nTraining\n image download completed. Training in\n progress.
\nStatus messages are subject to change. Therefore, we recommend not including them\n in code that programmatically initiates actions. For examples, don't use status\n messages in if statements.
\nTo have an overview of your training job's progress, view\n TrainingJobStatus
and SecondaryStatus
in DescribeTrainingJob, and StatusMessage
together. For\n example, at the start of a training job, you might see the following:
\n TrainingJobStatus
- InProgress
\n SecondaryStatus
- Training
\n StatusMessage
- Downloading the training image
A detailed description of the progress within a secondary status.\n
\nSageMaker provides secondary statuses and status messages that apply to each of\n them:
\nStarting the training job.
\nLaunching requested ML\n instances.
\nInsufficient\n capacity error from EC2 while launching instances,\n retrying!
\nLaunched\n instance was unhealthy, replacing it!
\nPreparing the instances for training.
\nDownloading the training image.
\nTraining\n image download completed. Training in\n progress.
\nStatus messages are subject to change. Therefore, we recommend not including them\n in code that programmatically initiates actions. For examples, don't use status\n messages in if statements.
\nTo have an overview of your training job's progress, view\n TrainingJobStatus
and SecondaryStatus
in DescribeTrainingJob, and StatusMessage
together. For\n example, at the start of a training job, you might see the following:
\n TrainingJobStatus
- InProgress
\n SecondaryStatus
- Training
\n StatusMessage
- Downloading the training image
An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides\n additional details about a status that the training job has transitioned through. A\n training job can be in one of several states, for example, starting, downloading,\n training, or uploading. Within each state, there are a number of intermediate states.\n For example, within the starting state, SageMaker could be starting the training job or\n launching the ML instances. These transitional states are referred to as the job's\n secondary\n status.\n
\n " + "smithy.api#documentation": "An array element of DescribeTrainingJobResponse$SecondaryStatusTransitions. It provides\n additional details about a status that the training job has transitioned through. A\n training job can be in one of several states, for example, starting, downloading,\n training, or uploading. Within each state, there are a number of intermediate states.\n For example, within the starting state, SageMaker could be starting the training job or\n launching the ML instances. These transitional states are referred to as the job's\n secondary\n status.\n
\n " } }, "com.amazonaws.sagemaker#SecondaryStatusTransitions": { @@ -48592,7 +48172,7 @@ "ProvisionedProductStatusMessage": { "target": "com.amazonaws.sagemaker#ProvisionedProductStatusMessage", "traits": { - "smithy.api#documentation": "The current status of the product.
\n\n AVAILABLE
- Stable state, ready to perform any operation. The most recent operation succeeded and completed.
\n UNDER_CHANGE
- Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.
\n TAINTED
- Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.
\n ERROR
- An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.
\n PLAN_IN_PROGRESS
- Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.
The current status of the product.
\n\n AVAILABLE
- Stable state, ready to perform any operation. The most recent operation succeeded and completed.
\n UNDER_CHANGE
- Transitive state. Operations performed might not have valid results. Wait for an AVAILABLE status before performing operations.
\n TAINTED
- Stable state, ready to perform any operation. The stack has completed the requested operation but is not exactly what was requested. For example, a request to update to a new version failed and the stack rolled back to the current version.
\n ERROR
- An unexpected error occurred. The provisioned product exists but the stack is not running. For example, CloudFormation received a parameter value that was not valid and could not launch the stack.
\n PLAN_IN_PROGRESS
- Transitive state. The plan operations were performed to provision a new product, but resources have not yet been created. After reviewing the list of resources to be created, execute the plan. Wait for an AVAILABLE status before performing operations.
\n The name of the production variant, which takes all the inference requests.\n
", + "smithy.api#documentation": "\n The name of the production variant.\n
", "smithy.api#required": {} } }, @@ -48681,7 +48261,7 @@ } }, "traits": { - "smithy.api#documentation": "\n The configuration of ShadowMode
inference experiment type, which specifies a production variant\n to take all the inference requests, and a shadow variant to which Amazon SageMaker replicates a percentage of the\n inference requests. For the shadow variant it also specifies the percentage of requests that Amazon SageMaker replicates.\n
\n Shows which variant is a production variant and which variant is a shadow variant. For shadow\n variants, also shows the sampling percentages.\n
" } }, "com.amazonaws.sagemaker#ShadowModelVariantConfig": { @@ -48698,7 +48278,7 @@ "target": "com.amazonaws.sagemaker#Percentage", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "\n The percentage of inference requests that Amazon SageMaker replicates from the production variant to the shadow variant.\n
", + "smithy.api#documentation": "The percentage of inference requests that are replicated to the shadow variant.
", "smithy.api#required": {} } } @@ -48758,7 +48338,7 @@ } }, "traits": { - "smithy.api#documentation": "A configuration for a shuffle option for input data in a channel. If you use\n S3Prefix
for S3DataType
, the results of the S3 key prefix\n matches are shuffled. If you use ManifestFile
, the order of the S3 object\n references in the ManifestFile
is shuffled. If you use\n AugmentedManifestFile
, the order of the JSON lines in the\n AugmentedManifestFile
is shuffled. The shuffling order is determined\n using the Seed
value.
For Pipe input mode, when ShuffleConfig
is specified shuffling is done at\n the start of every epoch. With large datasets, this ensures that the order of the\n training data is different for each epoch, and it helps reduce bias and possible\n overfitting. In a multi-node training job when ShuffleConfig
is combined\n with S3DataDistributionType
of ShardedByS3Key
, the data is\n shuffled across nodes so that the content sent to a particular node on the first epoch\n might be sent to a different node on the second epoch.
A configuration for a shuffle option for input data in a channel. If you use\n S3Prefix
for S3DataType
, the results of the S3 key prefix\n matches are shuffled. If you use ManifestFile
, the order of the S3 object\n references in the ManifestFile
is shuffled. If you use\n AugmentedManifestFile
, the order of the JSON lines in the\n AugmentedManifestFile
is shuffled. The shuffling order is determined\n using the Seed
value.
For Pipe input mode, when ShuffleConfig
is specified shuffling is done at\n the start of every epoch. With large datasets, this ensures that the order of the\n training data is different for each epoch, and it helps reduce bias and possible\n overfitting. In a multi-node training job when ShuffleConfig
is combined\n with S3DataDistributionType
of ShardedByS3Key
, the data is\n shuffled across nodes so that the content sent to a particular node on the first epoch\n might be sent to a different node on the second epoch.
The Amazon S3 path where the model artifacts, which result from model training, are stored.\n This path must point to a single gzip
compressed tar archive\n (.tar.gz
suffix).
The model artifacts must be in an S3 bucket that is in the same region as the\n algorithm.
\nThe Amazon S3 path where the model artifacts, which result from model training, are stored.\n This path must point to a single gzip
compressed tar archive\n (.tar.gz
suffix).
The model artifacts must be in an S3 bucket that is in the same region as the\n algorithm.
\nA list of one to ten Classless Inter-Domain Routing (CIDR) values.
\nMaximum: Ten CIDR values
\nThe following Length Constraints apply to individual CIDR values in\n the CIDR value list.
\nA list of one to ten Classless Inter-Domain Routing (CIDR) values.
\nMaximum: Ten CIDR values
\nThe following Length Constraints apply to individual CIDR values in\n the CIDR value list.
\nStops a model compilation job.
\nTo stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the\n job down. If the job hasn't stopped, it sends the SIGKILL signal.
\nWhen it receives a StopCompilationJob
request, Amazon SageMaker changes the CompilationJobSummary$CompilationJobStatus of the job to\n Stopping
. After Amazon SageMaker stops the job, it sets the CompilationJobSummary$CompilationJobStatus to Stopped
.\n
Stops a model compilation job.
\nTo stop a job, Amazon SageMaker sends the algorithm the SIGTERM signal. This gracefully shuts the\n job down. If the job hasn't stopped, it sends the SIGKILL signal.
\nWhen it receives a StopCompilationJob
request, Amazon SageMaker changes the CompilationJobSummary$CompilationJobStatus of the job to\n Stopping
. After Amazon SageMaker stops the job, it sets the CompilationJobSummary$CompilationJobStatus to Stopped
.\n
Stops a running hyperparameter tuning job and all running training jobs that the\n tuning job launched.
\nAll model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All\n data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the\n tuning job moves to the Stopped
state, it releases all\n reserved\n resources for the tuning job.
Stops a running hyperparameter tuning job and all running training jobs that the\n tuning job launched.
\nAll model artifacts output from the training jobs are stored in Amazon Simple Storage Service (Amazon S3). All\n data that the training jobs write to Amazon CloudWatch Logs are still available in CloudWatch. After the\n tuning job moves to the Stopped
state, it releases all\n reserved\n resources for the tuning job.
\n Array of key-value pairs, with names of variants mapped to actions. The possible actions are the following:\n
\n\n Promote
- Promote the shadow variant to a production variant
\n Remove
- Delete the variant
\n Retain
- Keep the variant as it is
\n Array of key-value pairs, with names of variants mapped to actions. The possible actions are the following:\n
\n\n Promote
- Promote the shadow variant to a production variant
\n Remove
- Delete the variant
\n Retain
- Keep the variant as it is
\n An array of ModelVariantConfig
objects. There is one for each variant that you want to deploy\n after the inference experiment stops. Each ModelVariantConfig
describes the infrastructure\n configuration for deploying the corresponding variant.\n
\n Array of ModelVariantConfig
objects. There is one for each variant that you want to deploy\n after the inference experiment stops. Each ModelVariantConfig
describes the infrastructure\n configuration for deploying the corresponding variant.\n
\n The desired state of the experiment after stopping. The possible states are the following:\n
\n\n Completed
: The experiment completed successfully
\n Cancelled
: The experiment was canceled
\n The desired state of the experiment after stopping. The possible states are the following:\n
\n\n Completed
: The experiment completed successfully
\n Cancelled
: The experiment was canceled
Terminates the ML compute instance. Before terminating the instance, SageMaker\n disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker\n stops charging you for the ML compute instance when you call\n StopNotebookInstance
.
To access data on the ML storage volume for a notebook instance that has been\n terminated, call the StartNotebookInstance
API.\n StartNotebookInstance
launches another ML compute instance, configures\n it, and attaches the preserved ML storage volume so you can continue your work.\n
Terminates the ML compute instance. Before terminating the instance, SageMaker\n disconnects the ML storage volume from it. SageMaker preserves the ML storage volume. SageMaker\n stops charging you for the ML compute instance when you call\n StopNotebookInstance
.
To access data on the ML storage volume for a notebook instance that has been\n terminated, call the StartNotebookInstance
API.\n StartNotebookInstance
launches another ML compute instance, configures\n it, and attaches the preserved ML storage volume so you can continue your work.\n
Stops a pipeline execution.
\n\n Callback Step\n
\nA pipeline execution won't stop while a callback step is running.\n When you call StopPipelineExecution
\n on a pipeline execution with a running callback step, SageMaker Pipelines sends an\n additional Amazon SQS message to the specified SQS queue. The body of the SQS message\n contains a \"Status\" field which is set to \"Stopping\".
You should add logic to your Amazon SQS message consumer to take any needed action (for\n example, resource cleanup) upon receipt of the message followed by a call to\n SendPipelineExecutionStepSuccess
or\n SendPipelineExecutionStepFailure
.
Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.
\n\n Lambda Step\n
\nA pipeline execution can't be stopped while a lambda step is running because the Lambda\n function invoked by the lambda step can't be stopped. If you attempt to stop the execution\n while the Lambda function is running, the pipeline waits for the Lambda function to finish\n or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function\n finishes, the pipeline execution status is Stopped
. If the timeout is hit\n the pipeline execution status is Failed
.
Stops a pipeline execution.
\n\n\n Callback Step\n
\nA pipeline execution won't stop while a callback step is running.\n When you call StopPipelineExecution
\n on a pipeline execution with a running callback step, SageMaker Pipelines sends an\n additional Amazon SQS message to the specified SQS queue. The body of the SQS message\n contains a \"Status\" field which is set to \"Stopping\".
You should add logic to your Amazon SQS message consumer to take any needed action (for\n example, resource cleanup) upon receipt of the message followed by a call to\n SendPipelineExecutionStepSuccess
or\n SendPipelineExecutionStepFailure
.
Only when SageMaker Pipelines receives one of these calls will it stop the pipeline execution.
\n\n\n Lambda Step\n
\nA pipeline execution can't be stopped while a lambda step is running because the Lambda\n function invoked by the lambda step can't be stopped. If you attempt to stop the execution\n while the Lambda function is running, the pipeline waits for the Lambda function to finish\n or until the timeout is hit, whichever occurs first, and then stops. If the Lambda function\n finishes, the pipeline execution status is Stopped
. If the timeout is hit\n the pipeline execution status is Failed
.
Stops a training job. To stop a job, SageMaker sends the algorithm the\n SIGTERM
signal, which delays job termination for 120 seconds.\n Algorithms might use this 120-second window to save the model artifacts, so the results\n of the training is not lost.
When it receives a StopTrainingJob
request, SageMaker changes the status of\n the job to Stopping
. After SageMaker stops the job, it sets the status to\n Stopped
.
Stops a training job. To stop a job, SageMaker sends the algorithm the\n SIGTERM
signal, which delays job termination for 120 seconds.\n Algorithms might use this 120-second window to save the model artifacts, so the results\n of the training is not lost.
When it receives a StopTrainingJob
request, SageMaker changes the status of\n the job to Stopping
. After SageMaker stops the job, it sets the status to\n Stopped
.
Stops a batch transform job.
\nWhen Amazon SageMaker receives a StopTransformJob
request, the status of the job\n changes to Stopping
. After Amazon SageMaker\n stops\n the job, the status is set to Stopped
. When you stop a batch transform job before\n it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.
Stops a batch transform job.
\nWhen Amazon SageMaker receives a StopTransformJob
request, the status of the job\n changes to Stopping
. After Amazon SageMaker\n stops\n the job, the status is set to Stopped
. When you stop a batch transform job before\n it is completed, Amazon SageMaker doesn't store the job's output in Amazon S3.
The maximum length of time, in seconds, that a training or compilation job can run\n before it is stopped.
\nFor compilation jobs, if the job does not complete during this time, a\n TimeOut
error is generated. We recommend starting with 900 seconds and\n increasing as necessary based on your model.
For all other jobs, if the job does not complete during this time, SageMaker ends the job.\n When RetryStrategy
is specified in the job request,\n MaxRuntimeInSeconds
specifies the maximum time for all of the attempts\n in total, not each individual attempt. The default value is 1 day. The maximum value is\n 28 days.
The maximum time that a TrainingJob
can run in total, including any time\n spent publishing metrics or archiving and uploading models after it has been stopped, is\n 30 days.
The maximum length of time, in seconds, that a training or compilation job can run\n before it is stopped.
\nFor compilation jobs, if the job does not complete during this time, a\n TimeOut
error is generated. We recommend starting with 900 seconds and\n increasing as necessary based on your model.
For all other jobs, if the job does not complete during this time, SageMaker ends the job.\n When RetryStrategy
is specified in the job request,\n MaxRuntimeInSeconds
specifies the maximum time for all of the attempts\n in total, not each individual attempt. The default value is 1 day. The maximum value is\n 28 days.
The maximum time that a TrainingJob
can run in total, including any time\n spent publishing metrics or archiving and uploading models after it has been stopped, is\n 30 days.
The maximum length of time, in seconds, that a managed Spot training job has to\n complete. It is the amount of time spent waiting for Spot capacity plus the amount of\n time the job can run. It must be equal to or greater than\n MaxRuntimeInSeconds
. If the job does not complete during this time,\n SageMaker ends the job.
When RetryStrategy
is specified in the job request,\n MaxWaitTimeInSeconds
specifies the maximum time for all of the attempts\n in total, not each individual attempt.
The maximum length of time, in seconds, that a managed Spot training job has to\n complete. It is the amount of time spent waiting for Spot capacity plus the amount of\n time the job can run. It must be equal to or greater than\n MaxRuntimeInSeconds
. If the job does not complete during this time,\n SageMaker ends the job.
When RetryStrategy
is specified in the job request,\n MaxWaitTimeInSeconds
specifies the maximum time for all of the attempts\n in total, not each individual attempt.
Specifies a limit to how long a model training job or model compilation job can run.\n It also specifies how long a managed spot training job has to complete. When the job\n reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap\n model training costs.
\nTo stop a training job, SageMaker sends the algorithm the SIGTERM
signal,\n which delays job termination for 120 seconds. Algorithms can use this 120-second window\n to save the model artifacts, so the results of training are not lost.
The training algorithms provided by SageMaker automatically save the intermediate results\n of a model training job when possible. This attempt to save artifacts is only a best\n effort case as model might not be in a state from which it can be saved. For example, if\n training has just started, the model might not be ready to save. When saved, this\n intermediate data is a valid model artifact. You can use it to create a model with\n CreateModel
.
The Neural Topic Model (NTM) currently does not support saving intermediate model\n artifacts. When training NTMs, make sure that the maximum runtime is sufficient for\n the training job to complete.
\nSpecifies a limit to how long a model training job or model compilation job can run.\n It also specifies how long a managed spot training job has to complete. When the job\n reaches the time limit, SageMaker ends the training or compilation job. Use this API to cap\n model training costs.
\nTo stop a training job, SageMaker sends the algorithm the SIGTERM
signal,\n which delays job termination for 120 seconds. Algorithms can use this 120-second window\n to save the model artifacts, so the results of training are not lost.
The training algorithms provided by SageMaker automatically save the intermediate results\n of a model training job when possible. This attempt to save artifacts is only a best\n effort case as model might not be in a state from which it can be saved. For example, if\n training has just started, the model might not be ready to save. When saved, this\n intermediate data is a valid model artifact. You can use it to create a model with\n CreateModel
.
The Neural Topic Model (NTM) currently does not support saving intermediate model\n artifacts. When training NTMs, make sure that the maximum runtime is sufficient for\n the training job to complete.
\nA tag object that consists of a key and an optional value, used to manage metadata\n for SageMaker Amazon Web Services resources.
\nYou can add tags to notebook instances, training jobs, hyperparameter tuning jobs,\n batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and\n endpoints. For more information on adding tags to SageMaker resources, see AddTags.
\nFor more information on adding metadata to your Amazon Web Services resources with\n tagging, see Tagging Amazon Web Services resources. For advice on best practices for\n managing Amazon Web Services resources with tagging, see Tagging\n Best Practices: Implement an Effective Amazon Web Services Resource Tagging\n Strategy.
" + "smithy.api#documentation": "A tag object that consists of a key and an optional value, used to manage metadata\n for SageMaker Amazon Web Services resources.
\nYou can add tags to notebook instances, training jobs, hyperparameter tuning jobs,\n batch transform jobs, models, labeling jobs, work teams, endpoint configurations, and\n endpoints. For more information on adding tags to SageMaker resources, see AddTags.
\nFor more information on adding metadata to your Amazon Web Services resources with\n tagging, see Tagging Amazon Web Services resources. For advice on best practices for\n managing Amazon Web Services resources with tagging, see Tagging\n Best Practices: Implement an Effective Amazon Web Services Resource Tagging\n Strategy.
" } }, "com.amazonaws.sagemaker#TagKey": { @@ -50719,21 +50299,21 @@ "Os": { "target": "com.amazonaws.sagemaker#TargetPlatformOs", "traits": { - "smithy.api#documentation": "Specifies a target platform OS.
\n\n LINUX
: Linux-based operating systems.
\n ANDROID
: Android operating systems. Android API level can be\n specified using the ANDROID_PLATFORM
compiler option. For example,\n \"CompilerOptions\": {'ANDROID_PLATFORM': 28}
\n
Specifies a target platform OS.
\n\n LINUX
: Linux-based operating systems.
\n ANDROID
: Android operating systems. Android API level can be\n specified using the ANDROID_PLATFORM
compiler option. For example,\n \"CompilerOptions\": {'ANDROID_PLATFORM': 28}
\n
Specifies a target platform architecture.
\n\n X86_64
: 64-bit version of the x86 instruction set.
\n X86
: 32-bit version of the x86 instruction set.
\n ARM64
: ARMv8 64-bit CPU.
\n ARM_EABIHF
: ARMv7 32-bit, Hard Float.
\n ARM_EABI
: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM\n platform.
Specifies a target platform architecture.
\n\n X86_64
: 64-bit version of the x86 instruction set.
\n X86
: 32-bit version of the x86 instruction set.
\n ARM64
: ARMv8 64-bit CPU.
\n ARM_EABIHF
: ARMv7 32-bit, Hard Float.
\n ARM_EABI
: ARMv7 32-bit, Soft Float. Used by Android 32-bit ARM\n platform.
Specifies a target platform accelerator (optional).
\n\n NVIDIA
: Nvidia graphics processing unit. It also requires\n gpu-code
, trt-ver
, cuda-ver
compiler\n options
\n MALI
: ARM Mali graphics processor
\n INTEL_GRAPHICS
: Integrated Intel graphics
Specifies a target platform accelerator (optional).
\n\n NVIDIA
: Nvidia graphics processing unit. It also requires\n gpu-code
, trt-ver
, cuda-ver
compiler\n options
\n MALI
: ARM Mali graphics processor
\n INTEL_GRAPHICS
: Integrated Intel graphics
Traffic routing strategy type.
\n\n ALL_AT_ONCE
: Endpoint traffic shifts to the new fleet\n in a single step.\n
\n CANARY
: Endpoint traffic shifts to the new fleet\n in two steps. The first step is the canary, which is a small portion of the traffic. The\n second step is the remainder of the traffic.\n
\n LINEAR
: Endpoint traffic shifts to the new fleet in\n n steps of a configurable size.\n
Traffic routing strategy type.
\n\n ALL_AT_ONCE
: Endpoint traffic shifts to the new fleet\n in a single step.\n
\n CANARY
: Endpoint traffic shifts to the new fleet\n in two steps. The first step is the canary, which is a small portion of the traffic. The\n second step is the remainder of the traffic.\n
\n LINEAR
: Endpoint traffic shifts to the new fleet in\n n steps of a configurable size.\n
The training input mode that the algorithm supports. For more information about input\n modes, see Algorithms.
\n\n Pipe mode\n
\nIf an algorithm supports Pipe
mode, Amazon SageMaker streams data directly from\n Amazon S3 to the container.
\n File mode\n
\nIf an algorithm supports File
mode, SageMaker downloads the training data from\n S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume\n for the training container.
You must provision the ML storage volume with sufficient capacity to accommodate the\n data downloaded from S3. In addition to the training data, the ML storage volume also\n stores the output model. The algorithm container uses the ML storage volume to also\n store intermediate information, if any.
\nFor distributed algorithms, training data is distributed uniformly. Your training\n duration is predictable if the input data objects sizes are approximately the same. SageMaker\n does not split the files any further for model training. If the object sizes are skewed,\n training won't be optimal as the data distribution is also skewed when one host in a\n training cluster is overloaded, thus becoming a bottleneck in training.
\n\n FastFile mode\n
\nIf an algorithm supports FastFile
mode, SageMaker streams data directly from\n S3 to the container with no code changes, and provides file system access to the data.\n Users can author their training script to interact with these files as if they were\n stored on disk.
\n FastFile
mode works best when the data is read sequentially. Augmented\n manifest files aren't supported. The startup time is lower when there are fewer files in\n the S3 bucket provided.
The training input mode that the algorithm supports. For more information about input\n modes, see Algorithms.
\n\n\n Pipe mode\n
\nIf an algorithm supports Pipe
mode, Amazon SageMaker streams data directly from\n Amazon S3 to the container.
\n File mode\n
\nIf an algorithm supports File
mode, SageMaker downloads the training data from\n S3 to the provisioned ML storage volume, and mounts the directory to the Docker volume\n for the training container.
You must provision the ML storage volume with sufficient capacity to accommodate the\n data downloaded from S3. In addition to the training data, the ML storage volume also\n stores the output model. The algorithm container uses the ML storage volume to also\n store intermediate information, if any.
\nFor distributed algorithms, training data is distributed uniformly. Your training\n duration is predictable if the input data objects sizes are approximately the same. SageMaker\n does not split the files any further for model training. If the object sizes are skewed,\n training won't be optimal as the data distribution is also skewed when one host in a\n training cluster is overloaded, thus becoming a bottleneck in training.
\n\n\n FastFile mode\n
\nIf an algorithm supports FastFile
mode, SageMaker streams data directly from\n S3 to the container with no code changes, and provides file system access to the data.\n Users can author their training script to interact with these files as if they were\n stored on disk.
\n FastFile
mode works best when the data is read sequentially. Augmented\n manifest files aren't supported. The startup time is lower when there are fewer files in\n the S3 bucket provided.
The status of the\n training\n job.
\nTraining job statuses are:
\n\n InProgress
- The training is in progress.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. To see the reason for the\n failure, see the FailureReason
field in the response to a\n DescribeTrainingJobResponse
call.
\n Stopping
- The training job is stopping.
\n Stopped
- The training job has stopped.
For\n more detailed information, see SecondaryStatus
.
The status of the\n training\n job.
\nTraining job statuses are:
\n\n InProgress
- The training is in progress.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. To see the reason for the\n failure, see the FailureReason
field in the response to a\n DescribeTrainingJobResponse
call.
\n Stopping
- The training job is stopping.
\n Stopped
- The training job has stopped.
For\n more detailed information, see SecondaryStatus
.
Provides detailed information about the state of the training job. For detailed\n information about the secondary status of the training job, see\n StatusMessage
under SecondaryStatusTransition.
SageMaker provides primary statuses and secondary statuses that apply to each of\n them:
\n\n Starting
\n - Starting the training job.
\n Downloading
- An optional stage for algorithms that\n support File
training input mode. It indicates that\n data is being downloaded to the ML storage volumes.
\n Training
- Training is in progress.
\n Uploading
- Training is complete and the model\n artifacts are being uploaded to the S3 location.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. The reason for\n the failure is returned in the FailureReason
field of\n DescribeTrainingJobResponse
.
\n MaxRuntimeExceeded
- The job stopped because it\n exceeded the maximum allowed runtime.
\n Stopped
- The training job has stopped.
\n Stopping
- Stopping the training job.
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
\n\n LaunchingMLInstances
\n
\n PreparingTrainingStack
\n
\n DownloadingTrainingImage
\n
Provides detailed information about the state of the training job. For detailed\n information about the secondary status of the training job, see\n StatusMessage
under SecondaryStatusTransition.
SageMaker provides primary statuses and secondary statuses that apply to each of\n them:
\n\n Starting
\n - Starting the training job.
\n Downloading
- An optional stage for algorithms that\n support File
training input mode. It indicates that\n data is being downloaded to the ML storage volumes.
\n Training
- Training is in progress.
\n Uploading
- Training is complete and the model\n artifacts are being uploaded to the S3 location.
\n Completed
- The training job has completed.
\n Failed
- The training job has failed. The reason for\n the failure is returned in the FailureReason
field of\n DescribeTrainingJobResponse
.
\n MaxRuntimeExceeded
- The job stopped because it\n exceeded the maximum allowed runtime.
\n Stopped
- The training job has stopped.
\n Stopping
- Stopping the training job.
Valid values for SecondaryStatus
are subject to change.
We no longer support the following secondary statuses:
\n\n LaunchingMLInstances
\n
\n PreparingTrainingStack
\n
\n DownloadingTrainingImage
\n
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays\n job termination for 120 seconds. Algorithms can use this 120-second window to save the\n model artifacts, so the results of training are not lost.
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM
signal, which delays\n job termination for 120 seconds. Algorithms can use this 120-second window to save the\n model artifacts, so the results of training are not lost.
Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job\n termination for 120 seconds. Algorithms can use this 120-second window to save the model\n artifacts.
", + "smithy.api#documentation": "Specifies a limit to how long a model training job can run. It also specifies how long\n a managed Spot training job has to complete. When the job reaches the time limit, SageMaker\n ends the training job. Use this API to cap model training costs.
\nTo stop a job, SageMaker sends the algorithm the SIGTERM signal, which delays job\n termination for 120 seconds. Algorithms can use this 120-second window to save the model\n artifacts.
", "smithy.api#required": {} } } @@ -52167,7 +51747,7 @@ "SplitType": { "target": "com.amazonaws.sagemaker#SplitType", "traits": { - "smithy.api#documentation": "The method to use to split the transform job's data files into smaller batches.\n Splitting is necessary when the total size of each object is too large to fit in a\n single request. You can also use data splitting to improve performance by processing\n multiple concurrent mini-batches. The default value for SplitType
is\n None
, which indicates that input data files are not split, and request\n payloads contain the entire contents of an input object. Set the value of this parameter\n to Line
to split records on a newline character boundary.\n SplitType
also supports a number of record-oriented binary data\n formats. Currently, the supported record formats are:
RecordIO
\nTFRecord
\nWhen splitting is enabled, the size of a mini-batch depends on the values of the\n BatchStrategy
and MaxPayloadInMB
parameters. When the\n value of BatchStrategy
is MultiRecord
, Amazon SageMaker sends the maximum\n number of records in each request, up to the MaxPayloadInMB
limit. If the\n value of BatchStrategy
is SingleRecord
, Amazon SageMaker sends individual\n records in each request.
Some data formats represent a record as a binary payload wrapped with extra\n padding bytes. When splitting is applied to a binary data format, padding is removed\n if the value of BatchStrategy
is set to SingleRecord
.\n Padding is not removed if the value of BatchStrategy
is set to\n MultiRecord
.
For more information about RecordIO
, see Create a Dataset Using\n RecordIO in the MXNet documentation. For more information about\n TFRecord
, see Consuming TFRecord data in the TensorFlow documentation.
The method to use to split the transform job's data files into smaller batches.\n Splitting is necessary when the total size of each object is too large to fit in a\n single request. You can also use data splitting to improve performance by processing\n multiple concurrent mini-batches. The default value for SplitType
is\n None
, which indicates that input data files are not split, and request\n payloads contain the entire contents of an input object. Set the value of this parameter\n to Line
to split records on a newline character boundary.\n SplitType
also supports a number of record-oriented binary data\n formats. Currently, the supported record formats are:
RecordIO
\nTFRecord
\nWhen splitting is enabled, the size of a mini-batch depends on the values of the\n BatchStrategy
and MaxPayloadInMB
parameters. When the\n value of BatchStrategy
is MultiRecord
, Amazon SageMaker sends the maximum\n number of records in each request, up to the MaxPayloadInMB
limit. If the\n value of BatchStrategy
is SingleRecord
, Amazon SageMaker sends individual\n records in each request.
Some data formats represent a record as a binary payload wrapped with extra\n padding bytes. When splitting is applied to a binary data format, padding is removed\n if the value of BatchStrategy
is set to SingleRecord
.\n Padding is not removed if the value of BatchStrategy
is set to\n MultiRecord
.
For more information about RecordIO
, see Create a Dataset Using\n RecordIO in the MXNet documentation. For more information about\n TFRecord
, see Consuming TFRecord data in the TensorFlow documentation.
A string that determines the number of records included in a single mini-batch.
\n\n SingleRecord
means only one record is used per mini-batch.\n MultiRecord
means a mini-batch is set to contain as many records that\n can fit within the MaxPayloadInMB
limit.
A string that determines the number of records included in a single mini-batch.
\n\n SingleRecord
means only one record is used per mini-batch.\n MultiRecord
means a mini-batch is set to contain as many records that\n can fit within the MaxPayloadInMB
limit.
The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For\n example, s3://bucket-name/key-name-prefix
.
For every S3 object used as input for the transform job, batch transform stores the\n transformed data with an .out
suffix in a corresponding subfolder in the\n location in the output prefix. For example, for the input data stored at\n s3://bucket-name/input-name-prefix/dataset01/data.csv
, batch transform\n stores the transformed data at\n s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out
.\n Batch transform doesn't upload partially processed objects. For an input S3 object that\n contains multiple records, it creates an .out
file only if the transform\n job succeeds on the entire file. When the input contains multiple S3 objects, the batch\n transform job processes the listed S3 objects and uploads only the output for\n successfully processed objects. If any object fails in the transform job batch transform\n marks the job as failed to prompt investigation.
The Amazon S3 path where you want Amazon SageMaker to store the results of the transform job. For\n example, s3://bucket-name/key-name-prefix
.
For every S3 object used as input for the transform job, batch transform stores the\n transformed data with an .out
suffix in a corresponding subfolder in the\n location in the output prefix. For example, for the input data stored at\n s3://bucket-name/input-name-prefix/dataset01/data.csv
, batch transform\n stores the transformed data at\n s3://bucket-name/output-name-prefix/input-name-prefix/data.csv.out
.\n Batch transform doesn't upload partially processed objects. For an input S3 object that\n contains multiple records, it creates an .out
file only if the transform\n job succeeds on the entire file. When the input contains multiple S3 objects, the batch\n transform job processes the listed S3 objects and uploads only the output for\n successfully processed objects. If any object fails in the transform job batch transform\n marks the job as failed to prompt investigation.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using\n Amazon S3 server-side encryption. The KmsKeyId
can be any of the following\n formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your\n role's account. For more information, see KMS-Managed Encryption Keys in the\n Amazon Simple Storage Service\n Developer Guide.\n
\nThe KMS key policy must grant permission to the IAM role that you specify in your\n\tCreateModel\n\t\trequest. For more information, see Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.
" + "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using\n Amazon S3 server-side encryption. The KmsKeyId
can be any of the following\n formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
If you don't provide a KMS key ID, Amazon SageMaker uses the default KMS key for Amazon S3 for your\n role's account. For more information, see KMS-Managed Encryption Keys in the\n Amazon Simple Storage Service\n Developer Guide.\n
\nThe KMS key policy must grant permission to the IAM role that you specify in your\n\tCreateModel\n\t\trequest. For more information, see Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.
" } } }, @@ -52742,7 +52322,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume\n attached to the ML compute instance(s) that run the batch transform job.
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId
when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\n\n The VolumeKmsKeyId
can be any of the following formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt model data on the storage volume\n attached to the ML compute instance(s) that run the batch transform job.
\nCertain Nitro-based instances include local storage, dependent on the instance\n type. Local storage volumes are encrypted using a hardware module on the instance.\n You can't request a VolumeKmsKeyId
when using an instance type with\n local storage.
For a list of instance types that support local instance storage, see Instance Store Volumes.
\nFor more information about local instance storage encryption, see SSD\n Instance Store Volumes.
\n\n The VolumeKmsKeyId
can be any of the following formats:
Key ID: 1234abcd-12ab-34cd-56ef-1234567890ab
\n
Key ARN:\n arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
\n
Alias name: alias/ExampleAlias
\n
Alias name ARN:\n arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias
\n
If you choose S3Prefix
, S3Uri
identifies a key name prefix.\n Amazon SageMaker uses all objects with the specified key name prefix for batch transform.
If you choose ManifestFile
, S3Uri
identifies an object that\n is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch\n transform.
The following values are compatible: ManifestFile
,\n S3Prefix
\n
The following value is not compatible: AugmentedManifestFile
\n
If you choose S3Prefix
, S3Uri
identifies a key name prefix.\n Amazon SageMaker uses all objects with the specified key name prefix for batch transform.
If you choose ManifestFile
, S3Uri
identifies an object that\n is a manifest file containing a list of object keys that you want Amazon SageMaker to use for batch\n transform.
The following values are compatible: ManifestFile
,\n S3Prefix
\n
The following value is not compatible: AugmentedManifestFile
\n
Depending on the value specified for the S3DataType
, identifies either a\n key name prefix or a manifest. For example:
A key name prefix might look like this:\n s3://bucketname/exampleprefix
.
A manifest might look like this:\n s3://bucketname/example.manifest
\n
The manifest is an S3 object which is a JSON file with the following format:
\n\n [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\n
\n \"relative/path/to/custdata-1\",
\n
\n \"relative/path/custdata-2\",
\n
\n ...
\n
\n \"relative/path/custdata-N\"
\n
\n ]
\n
The preceding JSON matches the following S3Uris
:
\n s3://customer_bucket/some/prefix/relative/path/to/custdata-1
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-2
\n
\n ...
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-N
\n
The complete set of S3Uris
in this manifest constitutes the\n input data for the channel for this datasource. The object that each\n S3Uris
points to must be readable by the IAM role that Amazon SageMaker\n uses to perform tasks on your behalf.
Depending on the value specified for the S3DataType
, identifies either a\n key name prefix or a manifest. For example:
A key name prefix might look like this:\n s3://bucketname/exampleprefix
.
A manifest might look like this:\n s3://bucketname/example.manifest
\n
The manifest is an S3 object which is a JSON file with the following format:
\n\n [ {\"prefix\": \"s3://customer_bucket/some/prefix/\"},
\n
\n \"relative/path/to/custdata-1\",
\n
\n \"relative/path/custdata-2\",
\n
\n ...
\n
\n \"relative/path/custdata-N\"
\n
\n ]
\n
The preceding JSON matches the following S3Uris
:
\n s3://customer_bucket/some/prefix/relative/path/to/custdata-1
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-2
\n
\n ...
\n
\n s3://customer_bucket/some/prefix/relative/path/custdata-N
\n
The complete set of S3Uris
in this manifest constitutes the\n input data for the channel for this datasource. The object that each\n S3Uris
points to must be readable by the IAM role that Amazon SageMaker\n uses to perform tasks on your behalf.
The ARN of the worker task template used to render the worker UI and tools for\n labeling job tasks.
\nUse this parameter when you are creating a labeling job for named entity recognition,\n 3D point cloud and video frame labeling jobs. Use your labeling job task type to select\n one of the following ARNs and use it with this parameter when you create a labeling job.\n Replace aws-region
with the Amazon Web Services Region you are creating your labeling job\n in. For example, replace aws-region
with us-west-1
if you\n create a labeling job in US West (N. California).
\n Named Entity Recognition\n
\nUse the following HumanTaskUiArn
for named entity recognition labeling\n jobs:
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/NamedEntityRecognition
\n
\n 3D Point Cloud HumanTaskUiArns\n
\nUse this HumanTaskUiArn
for 3D point cloud object detection and 3D point\n cloud object detection adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection
\n
Use this HumanTaskUiArn
for 3D point cloud object tracking and 3D point\n cloud object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking
\n
Use this HumanTaskUiArn
for 3D point cloud semantic segmentation and 3D\n point cloud semantic segmentation adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation
\n
\n Video Frame HumanTaskUiArns\n
\nUse this HumanTaskUiArn
for video frame object detection and video frame\n object detection adjustment labeling jobs.
\n arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection
\n
Use this HumanTaskUiArn
for video frame object tracking and video frame\n object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking
\n
The ARN of the worker task template used to render the worker UI and tools for\n labeling job tasks.
\nUse this parameter when you are creating a labeling job for named entity recognition,\n 3D point cloud and video frame labeling jobs. Use your labeling job task type to select\n one of the following ARNs and use it with this parameter when you create a labeling job.\n Replace aws-region
with the Amazon Web Services Region you are creating your labeling job\n in. For example, replace aws-region
with us-west-1
if you\n create a labeling job in US West (N. California).
\n Named Entity Recognition\n
\nUse the following HumanTaskUiArn
for named entity recognition labeling\n jobs:
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/NamedEntityRecognition
\n
\n 3D Point Cloud HumanTaskUiArns\n
\n \nUse this HumanTaskUiArn
for 3D point cloud object detection and 3D point\n cloud object detection adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectDetection
\n
Use this HumanTaskUiArn
for 3D point cloud object tracking and 3D point\n cloud object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudObjectTracking
\n
Use this HumanTaskUiArn
for 3D point cloud semantic segmentation and 3D\n point cloud semantic segmentation adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/PointCloudSemanticSegmentation
\n
\n Video Frame HumanTaskUiArns\n
\n \nUse this HumanTaskUiArn
for video frame object detection and video frame\n object detection adjustment labeling jobs.
\n arn:aws:sagemaker:region:394669845002:human-task-ui/VideoObjectDetection
\n
Use this HumanTaskUiArn
for video frame object tracking and video frame\n object tracking adjustment labeling jobs.
\n arn:aws:sagemaker:aws-region:394669845002:human-task-ui/VideoObjectTracking
\n
Provided configuration information for the worker UI for a labeling job. Provide\n either HumanTaskUiArn
or UiTemplateS3Uri
.
For named entity recognition, 3D point cloud and video frame labeling jobs, use\n HumanTaskUiArn
.
For all other Ground Truth built-in task types and custom task types, use\n UiTemplateS3Uri
to specify the location of a worker task template in\n Amazon S3.
Provided configuration information for the worker UI for a labeling job. Provide\n either HumanTaskUiArn
or UiTemplateS3Uri
.
For named entity recognition, 3D point cloud and video frame labeling jobs, use\n HumanTaskUiArn
.
For all other Ground Truth built-in task types and custom task types, use\n UiTemplateS3Uri
to specify the location of a worker task template in\n Amazon S3.
The configuration of the git repository, including the URL and the Amazon Resource\n Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the\n credentials used to access the repository. The secret must have a staging label of\n AWSCURRENT
and must be in the following format:
\n {\"username\": UserName, \"password\":\n Password}
\n
The configuration of the git repository, including the URL and the Amazon Resource\n Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the\n credentials used to access the repository. The secret must have a staging label of\n AWSCURRENT
and must be in the following format:
\n {\"username\": UserName, \"password\":\n Password}
\n
The default settings used to create a space within the Domain.
" } - }, - "AppSecurityGroupManagement": { - "target": "com.amazonaws.sagemaker#AppSecurityGroupManagement", - "traits": { - "smithy.api#documentation": "The entity that creates and manages the required security groups for inter-app\n communication in VPCOnly
mode. Required when\n CreateDomain.AppNetworkAccessType
is VPCOnly
and\n DomainSettings.RStudioServerProDomainSettings.DomainExecutionRoleArn
is\n provided.
Deploys the new EndpointConfig
specified in the request, switches to\n using newly created endpoint, and then deletes resources provisioned for the endpoint\n using the previous EndpointConfig
(there is no availability loss).
When SageMaker receives the request, it sets the endpoint status to\n Updating
. After updating the endpoint, it sets the status to\n InService
. To check the status of an endpoint, use the DescribeEndpoint API.\n \n
You must not delete an EndpointConfig
in use by an endpoint that is\n live or while the UpdateEndpoint
or CreateEndpoint
\n operations are being performed on the endpoint. To update an endpoint, you must\n create a new EndpointConfig
.
If you delete the EndpointConfig
of an endpoint that is active or\n being created or updated you may lose visibility into the instance type the endpoint\n is using. The endpoint must be deleted in order to stop incurring charges.
Deploys the new EndpointConfig
specified in the request, switches to\n using newly created endpoint, and then deletes resources provisioned for the endpoint\n using the previous EndpointConfig
(there is no availability loss).
When SageMaker receives the request, it sets the endpoint status to\n Updating
. After updating the endpoint, it sets the status to\n InService
. To check the status of an endpoint, use the DescribeEndpoint API.\n \n
You must not delete an EndpointConfig
in use by an endpoint that is\n live or while the UpdateEndpoint
or CreateEndpoint
\n operations are being performed on the endpoint. To update an endpoint, you must\n create a new EndpointConfig
.
If you delete the EndpointConfig
of an endpoint that is active or\n being created or updated you may lose visibility into the instance type the endpoint\n is using. The endpoint must be deleted in order to stop incurring charges.
The new ARN for the IAM role that enables Amazon SageMaker to perform tasks on your behalf.
" + "smithy.api#documentation": "The new Amazon Resource Name (ARN) for the IAM role that enables Amazon SageMaker to perform tasks on your behalf.
" } } } @@ -54428,117 +54002,7 @@ "ImageArn": { "target": "com.amazonaws.sagemaker#ImageArn", "traits": { - "smithy.api#documentation": "The ARN of the image.
" - } - } - } - }, - "com.amazonaws.sagemaker#UpdateImageVersion": { - "type": "operation", - "input": { - "target": "com.amazonaws.sagemaker#UpdateImageVersionRequest" - }, - "output": { - "target": "com.amazonaws.sagemaker#UpdateImageVersionResponse" - }, - "errors": [ - { - "target": "com.amazonaws.sagemaker#ResourceInUse" - }, - { - "target": "com.amazonaws.sagemaker#ResourceNotFound" - } - ], - "traits": { - "smithy.api#documentation": "Updates the properties of a SageMaker image version.
" - } - }, - "com.amazonaws.sagemaker#UpdateImageVersionRequest": { - "type": "structure", - "members": { - "ImageName": { - "target": "com.amazonaws.sagemaker#ImageName", - "traits": { - "smithy.api#documentation": "The name of the image.
", - "smithy.api#required": {} - } - }, - "Alias": { - "target": "com.amazonaws.sagemaker#SageMakerImageVersionAlias", - "traits": { - "smithy.api#documentation": "The alias of the image version.
" - } - }, - "Version": { - "target": "com.amazonaws.sagemaker#ImageVersionNumber", - "traits": { - "smithy.api#documentation": "The version of the image.
" - } - }, - "AliasesToAdd": { - "target": "com.amazonaws.sagemaker#SageMakerImageVersionAliases", - "traits": { - "smithy.api#documentation": "A list of aliases to add.
" - } - }, - "AliasesToDelete": { - "target": "com.amazonaws.sagemaker#SageMakerImageVersionAliases", - "traits": { - "smithy.api#documentation": "A list of aliases to delete.
" - } - }, - "VendorGuidance": { - "target": "com.amazonaws.sagemaker#VendorGuidance", - "traits": { - "smithy.api#documentation": "The availability of the image version specified by the maintainer.
\n\n NOT_PROVIDED
: The maintainers did not provide a status for image version stability.
\n STABLE
: The image version is stable.
\n TO_BE_ARCHIVED
: The image version is set to be archived. Custom image versions that are set to be archived are automatically archived after three months.
\n ARCHIVED
: The image version is archived. Archived image versions are not searchable and are no longer actively supported.
Indicates SageMaker job type compatibility.
\n\n TRAINING
: The image version is compatible with SageMaker training jobs.
\n INFERENCE
: The image version is compatible with SageMaker inference jobs.
\n NOTEBOOK_KERNEL
: The image version is compatible with SageMaker notebook kernels.
The machine learning framework vended in the image version.
" - } - }, - "ProgrammingLang": { - "target": "com.amazonaws.sagemaker#ProgrammingLang", - "traits": { - "smithy.api#documentation": "The supported programming language and its version.
" - } - }, - "Processor": { - "target": "com.amazonaws.sagemaker#Processor", - "traits": { - "smithy.api#documentation": "Indicates CPU or GPU compatibility.
\n\n CPU
: The image version is compatible with CPU.
\n GPU
: The image version is compatible with GPU.
Indicates Horovod compatibility.
" - } - }, - "ReleaseNotes": { - "target": "com.amazonaws.sagemaker#ReleaseNotes", - "traits": { - "smithy.api#documentation": "The maintainer description of the image version.
" - } - } - } - }, - "com.amazonaws.sagemaker#UpdateImageVersionResponse": { - "type": "structure", - "members": { - "ImageVersionArn": { - "target": "com.amazonaws.sagemaker#ImageVersionArn", - "traits": { - "smithy.api#documentation": "The ARN of the image version.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the image.
" } } } @@ -54588,19 +54052,19 @@ "ModelVariants": { "target": "com.amazonaws.sagemaker#ModelVariantConfigList", "traits": { - "smithy.api#documentation": "\n An array of ModelVariantConfig
objects. There is one for each variant, whose infrastructure\n configuration you want to update.\n
\n Array of ModelVariantConfigSummary
objects. There is one for each variant, whose infrastructure\n configuration you want to update.\n
The Amazon S3 location and configuration for storing inference request and response data.
" + "smithy.api#documentation": "The Amazon S3 storage configuration for the inference experiment.
" } }, "ShadowModeConfig": { "target": "com.amazonaws.sagemaker#ShadowModeConfig", "traits": { - "smithy.api#documentation": "\n The configuration of ShadowMode
inference experiment type. Use this field to specify a\n production variant which takes all the inference requests, and a shadow variant to which Amazon SageMaker replicates a\n percentage of the inference requests. For the shadow variant also specify the percentage of requests that\n Amazon SageMaker replicates.\n
The Amazon S3 storage configuration for the inference experiment.
" } } } @@ -54900,7 +54364,7 @@ "RoleArn": { "target": "com.amazonaws.sagemaker#RoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access the\n notebook instance. For more information, see SageMaker Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
The Amazon Resource Name (ARN) of the IAM role that SageMaker can assume to access the\n notebook instance. For more information, see SageMaker Roles.
\nTo be able to pass this role to SageMaker, the caller of this API must have the\n iam:PassRole
permission.
Whether root access is enabled or disabled for users of the notebook instance. The\n default value is Enabled
.
If you set this to Disabled
, users don't have root access on the\n notebook instance, but lifecycle configuration scripts still run with root\n permissions.
Whether root access is enabled or disabled for users of the notebook instance. The\n default value is Enabled
.
If you set this to Disabled
, users don't have root access on the\n notebook instance, but lifecycle configuration scripts still run with root\n permissions.
Updates a machine learning (ML) project that is created from a template that \n sets up an ML pipeline from training to deploying an approved model.
\nYou must not update a project that is in use. If you update the\n ServiceCatalogProvisioningUpdateDetails
of a project that is active\n or being created, or updated, you may lose resources already created by the\n project.
Updates a machine learning (ML) project that is created from a template that \n sets up an ML pipeline from training to deploying an approved model.
\nYou must not update a project that is in use. If you update the\n ServiceCatalogProvisioningUpdateDetails
of a project that is active\n or being created, or updated, you may lose resources already created by the\n project.
Use this operation to update your workforce. You can use this operation to \n require that workers use specific IP addresses to work on tasks\n and to update your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration.
\nThe worker portal is now supported in VPC and public internet.
\n Use SourceIpConfig
to restrict worker access to tasks to a specific range of IP addresses. \n You specify allowed IP addresses by creating a list of up to ten CIDRs.\n By default, a workforce isn't restricted to specific IP addresses. If you specify a\n range of IP addresses, workers who attempt to access tasks using any IP address outside\n the specified range are denied and get a Not Found
error message on\n the worker portal.
To restrict access to all the workers in public internet, add the SourceIpConfig
CIDR value as \"0.0.0.0/0\".
Amazon SageMaker does not support Source Ip restriction for worker portals in VPC.
\nUse OidcConfig
to update the configuration of a workforce created using\n your own OIDC IdP.
You can only update your OIDC IdP configuration when there are no work teams\n associated with your workforce. You can delete work teams using the operation.
\nAfter restricting access to a range of IP addresses or updating your OIDC IdP configuration with this operation, you\n can view details about your update workforce using the \n operation.
\nThis operation only applies to private workforces.
\nUse this operation to update your workforce. You can use this operation to \n require that workers use specific IP addresses to work on tasks\n and to update your OpenID Connect (OIDC) Identity Provider (IdP) workforce configuration.
\nThe worker portal is now supported in VPC and public internet.
\n\n \n Use SourceIpConfig
to restrict worker access to tasks to a specific range of IP addresses. \n You specify allowed IP addresses by creating a list of up to ten CIDRs.\n By default, a workforce isn't restricted to specific IP addresses. If you specify a\n range of IP addresses, workers who attempt to access tasks using any IP address outside\n the specified range are denied and get a Not Found
error message on\n the worker portal.
To restrict access to all the workers in public internet, add the SourceIpConfig
CIDR value as \"0.0.0.0/0\".
Amazon SageMaker does not support Source Ip restriction for worker portals in VPC.
\nUse OidcConfig
to update the configuration of a workforce created using\n your own OIDC IdP.
You can only update your OIDC IdP configuration when there are no work teams\n associated with your workforce. You can delete work teams using the operation.
\nAfter restricting access to a range of IP addresses or updating your OIDC IdP configuration with this operation, you\n can view details about your update workforce using the \n operation.
\nThis operation only applies to private workforces.
\nA list of one to ten worker IP address ranges (CIDRs) that can be used to\n access tasks assigned to this workforce.
\nMaximum: Ten CIDR values
" + "smithy.api#documentation": "A list of one to ten worker IP address ranges (CIDRs) that can be used to\n access tasks assigned to this workforce.
\nMaximum: Ten CIDR values
" } }, "OidcConfig": { @@ -55630,7 +55094,7 @@ "MemberDefinitions": { "target": "com.amazonaws.sagemaker#MemberDefinitions", "traits": { - "smithy.api#documentation": "A list of MemberDefinition
objects that contains objects that identify\n the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). \n For private workforces created using Amazon Cognito use\n CognitoMemberDefinition
. For workforces created using your own OIDC identity\n provider (IdP) use OidcMemberDefinition
. You should not provide input\n for both of these parameters in a single request.
For workforces created using Amazon Cognito, private work teams correspond to Amazon Cognito\n user groups within the user pool used to create a workforce. All of the\n CognitoMemberDefinition
objects that make up the member definition must\n have the same ClientId
and UserPool
values. To add a Amazon\n Cognito user group to an existing worker pool, see Adding groups to a User\n Pool. For more information about user pools, see Amazon Cognito User\n Pools.
For workforces created using your own OIDC IdP, specify the user groups that you want\n to include in your private work team in OidcMemberDefinition
by listing\n those groups in Groups
. Be aware that user groups that are already in the\n work team must also be listed in Groups
when you make this request to\n remain on the work team. If you do not include these user groups, they will no longer be\n associated with the work team you update.
A list of MemberDefinition
objects that contains objects that identify\n the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). \n For private workforces created using Amazon Cognito use\n CognitoMemberDefinition
. For workforces created using your own OIDC identity\n provider (IdP) use OidcMemberDefinition
. You should not provide input\n for both of these parameters in a single request.
For workforces created using Amazon Cognito, private work teams correspond to Amazon Cognito\n user groups within the user pool used to create a workforce. All of the\n CognitoMemberDefinition
objects that make up the member definition must\n have the same ClientId
and UserPool
values. To add a Amazon\n Cognito user group to an existing worker pool, see Adding groups to a User\n Pool. For more information about user pools, see Amazon Cognito User\n Pools.
For workforces created using your own OIDC IdP, specify the user groups that you want\n to include in your private work team in OidcMemberDefinition
by listing\n those groups in Groups
. Be aware that user groups that are already in the\n work team must also be listed in Groups
when you make this request to\n remain on the work team. If you do not include these user groups, they will no longer be\n associated with the work team you update.
The type of variant property. The supported values are:
\n\n DesiredInstanceCount
: Overrides the existing variant instance\n counts using the ProductionVariant$InitialInstanceCount values\n in the CreateEndpointConfigInput$ProductionVariants.
\n DesiredWeight
: Overrides the existing variant weights using the\n ProductionVariant$InitialVariantWeight values in the CreateEndpointConfigInput$ProductionVariants.
\n DataCaptureConfig
: (Not currently supported.)
The type of variant property. The supported values are:
\n\n DesiredInstanceCount
: Overrides the existing variant instance\n counts using the ProductionVariant$InitialInstanceCount values\n in the CreateEndpointConfigInput$ProductionVariants.
\n DesiredWeight
: Overrides the existing variant weights using the\n ProductionVariant$InitialVariantWeight values in the CreateEndpointConfigInput$ProductionVariants.
\n DataCaptureConfig
: (Not currently supported.)
Specifies a production variant property type for an Endpoint.
\nIf you are updating an endpoint with the UpdateEndpointInput$RetainAllVariantProperties option set to\n true
, the VariantProperty
objects listed in UpdateEndpointInput$ExcludeRetainedVariantProperties override the\n existing variant properties of the endpoint.
Specifies a production variant property type for an Endpoint.
\nIf you are updating an endpoint with the UpdateEndpointInput$RetainAllVariantProperties option set to\n true
, the VariantProperty
objects listed in UpdateEndpointInput$ExcludeRetainedVariantProperties override the\n existing variant properties of the endpoint.
The status of the warm pool.
\n\n InUse
: The warm pool is in use for the training job.
\n Available
: The warm pool is available to reuse for a matching\n training job.
\n Reused
: The warm pool moved to a matching training job for\n reuse.
\n Terminated
: The warm pool is no longer available. Warm pools are\n unavailable if they are terminated by a user, terminated for a patch update, or\n terminated for exceeding the specified\n KeepAlivePeriodInSeconds
.
The status of the warm pool.
\n\n InUse
: The warm pool is in use for the training job.
\n Available
: The warm pool is available to reuse for a matching\n training job.
\n Reused
: The warm pool moved to a matching training job for\n reuse.
\n Terminated
: The warm pool is no longer available. Warm pools are\n unavailable if they are terminated by a user, terminated for a patch update, or\n terminated for exceeding the specified\n KeepAlivePeriodInSeconds
.
The billable time in seconds used by the warm pool. Billable time refers to the\n absolute wall-clock time.
\nMultiply ResourceRetainedBillableTimeInSeconds
by the number of instances\n (InstanceCount
) in your training cluster to get the total compute time\n SageMaker bills you if you run warm pool training. The formula is as follows:\n ResourceRetainedBillableTimeInSeconds * InstanceCount
.
The billable time in seconds used by the warm pool. Billable time refers to the\n absolute wall-clock time.
\nMultiply ResourceRetainedBillableTimeInSeconds
by the number of instances\n (InstanceCount
) in your training cluster to get the total compute time\n SageMaker bills you if you run warm pool training. The formula is as follows:\n ResourceRetainedBillableTimeInSeconds * InstanceCount
.
A list of MemberDefinition
objects that contains objects that identify\n the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). \n For private workforces created using Amazon Cognito use\n CognitoMemberDefinition
. For workforces created using your own OIDC identity\n provider (IdP) use OidcMemberDefinition
.
A list of MemberDefinition
objects that contains objects that identify\n the workers that make up the work team.
Workforces can be created using Amazon Cognito or your own OIDC Identity Provider (IdP). \n For private workforces created using Amazon Cognito use\n CognitoMemberDefinition
. For workforces created using your own OIDC identity\n provider (IdP) use OidcMemberDefinition
.
How to allocate instances across Spot Instance pools. Valid values are as follows:
\n\n lowest-price
\n
\n capacity-optimized
\n
\n capacity-optimized-prioritized
\n
How to allocate instances across Spot Instance pools. Valid values are as follows:
\n\t\n lowest-price
\n
\n capacity-optimized
\n
\n capacity-optimized-prioritized
\n
The volume type. Valid values are as follows:
\n\n gp2
\n
\n gp3
\n
\n io1
\n
\n sc1
\n
\n st1
\n
\n standard
\n
The volume type. Valid values are as follows:
\n \t\n gp2
\n
\n gp3
\n
\n io1
\n
\n sc1
\n
\n st1
\n
\n standard
\n
The current status of the table. Valid values are as follows:
\n\n ACTIVE
\n
\n ARCHIVED
\n
\n ARCHIVING
\n
\n CREATING
\n
\n DELETING
\n
\n INACCESSIBLE_ENCRYPTION_CREDENTIALS
\n
\n UPDATING
\n
The current status of the table. Valid values are as follows:
\n\t\t\n ACTIVE
\n
\n ARCHIVED
\n
\n ARCHIVING
\n
\n CREATING
\n
\n DELETING
\n
\n INACCESSIBLE_ENCRYPTION_CREDENTIALS
\n
\n UPDATING
\n
The current status of the index.
\n\n ACTIVE
\n
\n CREATING
\n
\n DELETING
\n
\n UPDATING
\n
The current status of the index.
\n \t\n ACTIVE
\n
\n CREATING
\n
\n DELETING
\n
\n UPDATING
\n
The types of attributes that are projected into the index. Valid values are as follows:
\n\n ALL
\n
\n INCLUDE
\n
\n KEYS_ONLY
\n
The types of attributes that are projected into the index. Valid values are as follows:
\n \t\n ALL
\n
\n INCLUDE
\n
\n KEYS_ONLY
\n
The current status of the replica. Valid values are as follows:
\n\n ACTIVE
\n
\n CREATING
\n
\n CREATION_FAILED
\n
\n DELETING
\n
\n UPDATING
\n
The current status of the replica. Valid values are as follows:
\n \t\n ACTIVE
\n
\n CREATING
\n
\n CREATION_FAILED
\n
\n DELETING
\n
\n UPDATING
\n
\n The device name.\n
" - } - }, - "Ebs": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataBlockDeviceMappingSetEbsDetails", - "traits": { - "smithy.api#documentation": "\n Parameters used to automatically set up Amazon EBS volumes when the instance is \n launched.\n
" + "smithy.api#documentation": "The identifier of the association between the network ACL and the subnet.
" } }, - "NoDevice": { + "NetworkAclId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Omits the device from the block device mapping when an empty string is specified.\n
" + "smithy.api#documentation": "The identifier of the network ACL.
" } }, - "VirtualName": { + "SubnetId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The virtual device name (ephemeralN). Instance store volumes are numbered starting \n from 0. An instance type with 2 available instance store volumes can specify mappings \n for ephemeral0
and ephemeral1
. The number of available instance store volumes depends \n on the instance type.\n
The identifier of the subnet that is associated with the network ACL.
" } } }, "traits": { - "smithy.api#documentation": "\n Information about a block device mapping for an Amazon Elastic Compute Cloud (Amazon EC2) launch template.\n
" + "smithy.api#documentation": "An association between the network ACL and a subnet.
" + } + }, + "com.amazonaws.securityhub#AwsEc2NetworkAclAssociationList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkAclAssociation" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataBlockDeviceMappingSetEbsDetails": { + "com.amazonaws.securityhub#AwsEc2NetworkAclDetails": { "type": "structure", "members": { - "DeleteOnTermination": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether the EBS volume is deleted on instance termination.\n
" - } - }, - "Encrypted": { + "IsDefault": { "target": "com.amazonaws.securityhub#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether the EBS volume is encrypted. Encrypted volumes can only be \n attached to instances that support Amazon EBS encryption. If you're creating a \n volume from a snapshot, you can't specify an encryption value.\n
" - } - }, - "Iops": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of I/O operations per second (IOPS). \n
" + "smithy.api#documentation": "Whether this is the default network ACL for the VPC.
" } }, - "KmsKeyId": { + "NetworkAclId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the symmetric Key Management Service (KMS) customer managed key \n used for encryption.\n
" + "smithy.api#documentation": "The identifier of the network ACL.
" } }, - "SnapshotId": { + "OwnerId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The ID of the EBS snapshot.\n
" + "smithy.api#documentation": "The identifier of the Amazon Web Services account that owns the network ACL.
" } }, - "Throughput": { - "target": "com.amazonaws.securityhub#Integer", + "VpcId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The throughput to provision for a gp3 volume, with a maximum of 1,000 MiB/s.\n
" + "smithy.api#documentation": "The identifier of the VPC for the network ACL.
" } }, - "VolumeSize": { - "target": "com.amazonaws.securityhub#Integer", + "Associations": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkAclAssociationList", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The size of the volume, in GiBs. You must specify either a snapshot ID or a volume size. \n
" + "smithy.api#documentation": "Associations between the network ACL and subnets.
" } }, - "VolumeType": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Entries": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkAclEntryList", "traits": { - "smithy.api#documentation": "\n The volume type.\n
" + "smithy.api#documentation": "The set of rules in the network ACL.
" } } }, "traits": { - "smithy.api#documentation": "\n Parameters for a block device for an Amazon Elastic Block Store (Amazon EBS) volume in an Amazon EC2 launch template.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataBlockDeviceMappingSetList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataBlockDeviceMappingSetDetails" + "smithy.api#documentation": "Contains details about an Amazon EC2 network access control list (ACL).
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCapacityReservationSpecificationCapacityReservationTargetDetails": { + "com.amazonaws.securityhub#AwsEc2NetworkAclEntry": { "type": "structure", "members": { - "CapacityReservationId": { + "CidrBlock": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The ID of the Capacity Reservation in which to run the instance.\n
" + "smithy.api#documentation": "The IPV4 network range for which to deny or allow access.
" } }, - "CapacityReservationResourceGroupArn": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Egress": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the Capacity Reservation resource group in which to run the instance.\n
" + "smithy.api#default": false, + "smithy.api#documentation": "Whether the rule is an egress rule. An egress rule is a rule that applies to traffic that leaves the subnet.
" } - } - }, - "traits": { - "smithy.api#documentation": "\n Information about the target Capacity Reservation or Capacity Reservation group in which to run an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCapacityReservationSpecificationDetails": { - "type": "structure", - "members": { - "CapacityReservationPreference": { + }, + "IcmpTypeCode": { + "target": "com.amazonaws.securityhub#IcmpTypeCode", + "traits": { + "smithy.api#documentation": "The Internet Control Message Protocol (ICMP) type and code for which to deny or allow access.
" + } + }, + "Ipv6CidrBlock": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Indicates the instance's Capacity Reservation preferences. If equal to open
, the instance can run in any open Capacity Reservation that has matching attributes (instance type, platform, Availability Zone). If equal to none
, the instance avoids running in a Capacity Reservation even if one is available. The instance runs in On-Demand capacity.\n \n
The IPV6 network range for which to deny or allow access.
" } }, - "CapacityReservationTarget": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCapacityReservationSpecificationCapacityReservationTargetDetails", + "PortRange": { + "target": "com.amazonaws.securityhub#PortRangeFromTo", "traits": { - "smithy.api#documentation": "\n Specifies a target Capacity Reservation.\n
" + "smithy.api#documentation": "For TCP or UDP protocols, the range of ports that the rule applies to.
" } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies the Capacity Reservation targeting option of an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCpuOptionsDetails": { - "type": "structure", - "members": { - "CoreCount": { - "target": "com.amazonaws.securityhub#Integer", + }, + "Protocol": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of CPU cores for the instance.\n
" + "smithy.api#documentation": "The protocol that the rule applies to. To deny or allow access to all protocols, use the\n value -1
.
Whether the rule is used to allow access or deny access.
" + } + }, + "RuleNumber": { "target": "com.amazonaws.securityhub#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of threads per CPU core. A value of 1
disables multithreading for the instance, \n The default value is 2
.\n
The rule number. The rules are processed in order by their number.
" } } }, "traits": { - "smithy.api#documentation": "\n Specifies the CPU options for an Amazon EC2 instance. For more information, see \n Optimize CPU options in the Amazon Elastic Compute Cloud User Guide.\n
" + "smithy.api#documentation": "A rule for the network ACL. Each rule allows or denies access based on the IP address, traffic direction, port, and protocol.
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCreditSpecificationDetails": { - "type": "structure", - "members": { - "CpuCredits": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The credit option for CPU usage of a T instance.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies the credit option for CPU usage of a T2, T3, or T3a Amazon EC2 instance.\n
" + "com.amazonaws.securityhub#AwsEc2NetworkAclEntryList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkAclEntry" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataDetails": { + "com.amazonaws.securityhub#AwsEc2NetworkInterfaceAttachment": { "type": "structure", "members": { - "BlockDeviceMappingSet": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataBlockDeviceMappingSetList", - "traits": { - "smithy.api#documentation": "\n Information about a block device mapping for an Amazon EC2 launch template.\n
" - } - }, - "CapacityReservationSpecification": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCapacityReservationSpecificationDetails", - "traits": { - "smithy.api#documentation": "\n Specifies an instance's Capacity Reservation targeting option. You can specify only \n one option at a time.\n
" - } - }, - "CpuOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCpuOptionsDetails", - "traits": { - "smithy.api#documentation": "\n Specifies the CPU options for an instance. For more information, see \n Optimize CPU options in the Amazon Elastic Compute Cloud User Guide.\n
" - } - }, - "CreditSpecification": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataCreditSpecificationDetails", - "traits": { - "smithy.api#documentation": "\n Specifies the credit option for CPU usage of a T2, T3, or T3a instance.\n
" - } - }, - "DisableApiStop": { - "target": "com.amazonaws.securityhub#Boolean", + "AttachTime": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether to enable the instance for stop protection. For more information, \n see Enable stop protection in the Amazon EC2 User Guide.\n
" + "smithy.api#documentation": "Indicates when the attachment initiated.
\nUses the date-time
format specified in RFC 3339 section 5.6, Internet\n Date/Time Format. The value cannot contain spaces. For example,\n 2020-03-22T13:22:13.933Z
.
\n If you set this parameter to true
, you can't terminate the instance using the Amazon EC2 console, CLI, or API. If set to true
, you can. \n
The identifier of the network interface attachment
" } }, - "EbsOptimized": { + "DeleteOnTermination": { "target": "com.amazonaws.securityhub#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether the instance is optimized for Amazon EBS I/O. \n
" - } - }, - "ElasticGpuSpecificationSet": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticGpuSpecificationSetList", - "traits": { - "smithy.api#documentation": "\n Provides details about Elastic Graphics accelerators to associate with the instance.\n
" - } - }, - "ElasticInferenceAcceleratorSet": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticInferenceAcceleratorSetList", - "traits": { - "smithy.api#documentation": "\n The Amazon Elastic Inference accelerator for the instance.\n
" - } - }, - "EnclaveOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataEnclaveOptionsDetails", - "traits": { - "smithy.api#documentation": "\n Indicates whether the Amazon EC2 instance is enabled for Amazon Web Services Nitro Enclaves.\n
" - } - }, - "HibernationOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataHibernationOptionsDetails", - "traits": { - "smithy.api#documentation": "\n Specifies whether your Amazon EC2 instance is configured for hibernation. \n
" + "smithy.api#documentation": "Indicates whether the network interface is deleted when the instance is\n terminated.
" } }, - "IamInstanceProfile": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataIamInstanceProfileDetails", + "DeviceIndex": { + "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "\n The name or Amazon Resource Name (ARN) of an IAM instance profile.\n
" + "smithy.api#default": 0, + "smithy.api#documentation": "The device index of the network interface attachment on the instance.
" } }, - "ImageId": { + "InstanceId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The ID of the Amazon Machine Image (AMI).\n
" + "smithy.api#documentation": "The ID of the instance.
" } }, - "InstanceInitiatedShutdownBehavior": { + "InstanceOwnerId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Provides the options for specifying the instance initiated shutdown behavior.\n
" - } - }, - "InstanceMarketOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceMarketOptionsDetails", - "traits": { - "smithy.api#documentation": "\n Specifies the market (purchasing) option for an instance.\n
" - } - }, - "InstanceRequirements": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsDetails", - "traits": { - "smithy.api#documentation": "\n The attributes for the instance types. When you specify instance attributes, Amazon EC2 will identify instance \n types with these attributes. If you specify InstanceRequirements
, you can't specify InstanceType
.\n
The Amazon Web Services account ID of the owner of the instance.
" } }, - "InstanceType": { + "Status": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The instance type. For more information, see Instance types in the Amazon EC2 User Guide. If you specify InstanceType
, you can't \n specify InstanceRequirements
.\n
The attachment state.
\nValid values: attaching
| attached
| detaching
|\n detached
\n
Information about the network interface attachment.
" + } + }, + "com.amazonaws.securityhub#AwsEc2NetworkInterfaceDetails": { + "type": "structure", + "members": { + "Attachment": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceAttachment", "traits": { - "smithy.api#documentation": "\n The ID of the kernel.\n
" + "smithy.api#documentation": "The network interface attachment.
" } }, - "KeyName": { + "NetworkInterfaceId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The name of the key pair that allows users to connect to the instance.\n
" - } - }, - "LicenseSet": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataLicenseSetList", - "traits": { - "smithy.api#documentation": "\n Specifies a license configuration for an instance.\n
" - } - }, - "MaintenanceOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataMaintenanceOptionsDetails", - "traits": { - "smithy.api#documentation": "\n The maintenance options of your instance.\n
" - } - }, - "MetadataOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataMetadataOptionsDetails", - "traits": { - "smithy.api#documentation": "\n The metadata options for the instance. For more information, see Instance metadata and user data in the Amazon EC2 User Guide.\n
" + "smithy.api#documentation": "The ID of the network interface.
" } }, - "Monitoring": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataMonitoringDetails", + "SecurityGroups": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroupList", "traits": { - "smithy.api#documentation": "\n The monitoring for the instance.\n
" + "smithy.api#documentation": "Security groups for the network interface.
" } }, - "NetworkInterfaceSet": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetList", + "SourceDestCheck": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "\n Specifies the parameters for a network interface that is attached to the instance.\n
" + "smithy.api#default": false, + "smithy.api#documentation": "Indicates whether traffic to or from the instance is validated.
" } }, - "Placement": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataPlacementDetails", + "IpV6Addresses": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressList", "traits": { - "smithy.api#documentation": "\n Specifies the placement of an instance.\n
" + "smithy.api#documentation": "The IPv6 addresses associated with the network interface.
" } }, - "PrivateDnsNameOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataPrivateDnsNameOptionsDetails", + "PrivateIpAddresses": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressList", "traits": { - "smithy.api#documentation": "\n The options for the instance hostname.\n
" + "smithy.api#documentation": "The private IPv4 addresses associated with the network interface.
" } }, - "RamDiskId": { + "PublicDnsName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The ID of the RAM disk.\n
" - } - }, - "SecurityGroupIdSet": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n One or more security group IDs.\n
" - } - }, - "SecurityGroupSet": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n One or more security group names. For a nondefault VPC, you must use security group IDs instead. You cannot specify both a security group ID and security name in the same request.\n
" + "smithy.api#documentation": "The public DNS name of the network interface.
" } }, - "UserData": { + "PublicIp": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The user data to make available to the instance.\n
" + "smithy.api#documentation": "The address of the Elastic IP address bound to the network interface.
" } } }, "traits": { - "smithy.api#documentation": "\n The information to include in an Amazon Elastic Compute Cloud (Amazon EC2) launch template.\n
" + "smithy.api#documentation": "Details about the network interface
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticGpuSpecificationSetDetails": { + "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressDetail": { "type": "structure", "members": { - "Type": { + "IpV6Address": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The type of Elastic Graphics accelerator. \n
" + "smithy.api#documentation": "The IPV6 address.
" } } }, "traits": { - "smithy.api#documentation": "\n Provides details about an Elastic Graphics specification for an Amazon EC2 launch template.\n
" + "smithy.api#documentation": "Provides information about an IPV6 address that is associated with the network\n interface.
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticGpuSpecificationSetList": { + "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressList": { "type": "list", "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticGpuSpecificationSetDetails" + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressDetail" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticInferenceAcceleratorSetDetails": { + "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressDetail": { "type": "structure", "members": { - "Count": { - "target": "com.amazonaws.securityhub#Integer", + "PrivateIpAddress": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of Elastic Inference accelerators to attach to the instance.\n
" + "smithy.api#documentation": "The IP address.
" } }, - "Type": { + "PrivateDnsName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The type of Elastic Inference accelerator.\n
" + "smithy.api#documentation": "The private DNS name for the IP address.
" } } }, "traits": { - "smithy.api#documentation": "\n Provides details for an Amazon Elastic Inference accelerator.\n
" + "smithy.api#documentation": "Provides information about a private IPv4 address that is with the network\n interface.
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticInferenceAcceleratorSetList": { + "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressList": { "type": "list", "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataElasticInferenceAcceleratorSetDetails" + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressDetail" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataEnclaveOptionsDetails": { + "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroup": { "type": "structure", "members": { - "Enabled": { - "target": "com.amazonaws.securityhub#Boolean", + "GroupName": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n If this parameter is set to true
, the instance is enabled for Amazon Web Services Nitro Enclaves.\n
The name of the security group.
" } - } - }, - "traits": { - "smithy.api#documentation": "\n Indicates whether the instance is enabled for Amazon Web Services Nitro Enclaves.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataHibernationOptionsDetails": { - "type": "structure", - "members": { - "Configured": { - "target": "com.amazonaws.securityhub#Boolean", + }, + "GroupId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n If you set this parameter to true
, the instance is enabled for hibernation.\n
The ID of the security group.
" } } }, "traits": { - "smithy.api#documentation": "\n Specifies whether your Amazon EC2 instance is configured for hibernation.\n
" + "smithy.api#documentation": "A security group associated with the network interface.
" + } + }, + "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroup" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataIamInstanceProfileDetails": { + "com.amazonaws.securityhub#AwsEc2SecurityGroupDetails": { "type": "structure", "members": { - "Arn": { + "GroupName": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "The name of the security group.
" + } + }, + "GroupId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the instance profile.\n
" + "smithy.api#documentation": "The ID of the security group.
" } }, - "Name": { + "OwnerId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The name of the instance profile.\n
" + "smithy.api#documentation": "The Amazon Web Services account ID of the owner of the security group.
" } - } - }, - "traits": { - "smithy.api#documentation": "\n Provides details for an Identity and Access Management (IAM) instance profile, which is a container for an IAM role for your instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceMarketOptionsDetails": { - "type": "structure", - "members": { - "MarketType": { + }, + "VpcId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The market type.\n
" + "smithy.api#documentation": "[VPC only] The ID of the VPC for the security group.
" + } + }, + "IpPermissions": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermissionList", + "traits": { + "smithy.api#documentation": "The inbound rules associated with the security group.
" } }, - "SpotOptions": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceMarketOptionsSpotOptionsDetails", + "IpPermissionsEgress": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermissionList", "traits": { - "smithy.api#documentation": "\n The options for Spot Instances.\n
" + "smithy.api#documentation": "[VPC only] The outbound rules associated with the security group.
" } } }, "traits": { - "smithy.api#documentation": "\n Provides details about the market (purchasing) option for an Amazon EC2 instance.\n
" + "smithy.api#documentation": "Details about an Amazon EC2 security group.
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceMarketOptionsSpotOptionsDetails": { + "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermission": { "type": "structure", "members": { - "BlockDurationMinutes": { + "IpProtocol": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "The IP protocol name (tcp
, udp
, icmp
,\n icmpv6
) or number.
[VPC only] Use -1
to specify all protocols.
When authorizing security group rules, specifying -1
\n or a protocol number other than tcp
, udp
, icmp
, or\n icmpv6
allows traffic on all ports, regardless of any port range you\n specify.
For tcp
, udp
, and icmp
, you must specify a port\n range.
For icmpv6
, the port range is optional. If you omit the port range, traffic\n for all types and codes is allowed.
\n Deprecated.\n
" + "smithy.api#documentation": "The start of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type\n number.
\nA value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you\n must specify all codes.
" } }, - "InstanceInterruptionBehavior": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "ToPort": { + "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "\n The behavior when a Spot Instance is interrupted.\n
" + "smithy.api#default": 0, + "smithy.api#documentation": "The end of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
\nA value of -1
indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify all\n codes.
\n The maximum hourly price you're willing to pay for the Spot Instances.\n
" + "smithy.api#documentation": "The security group and Amazon Web Services account ID pairs.
" } }, - "SpotInstanceType": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "IpRanges": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRangeList", "traits": { - "smithy.api#documentation": "\n The Spot Instance request type.\n
" + "smithy.api#documentation": "The IPv4 ranges.
" } }, - "ValidUntil": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Ipv6Ranges": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6RangeList", + "traits": { + "smithy.api#documentation": "The IPv6 ranges.
" + } + }, + "PrefixListIds": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListIdList", "traits": { - "smithy.api#documentation": "\n The end date of the request, in UTC format (YYYY-MM-DDTHH:MM:SSZ), for persistent requests.\n
" + "smithy.api#documentation": "[VPC only] The prefix list IDs for an Amazon Web Services service. With outbound rules, this is the Amazon Web Services\n service to access through a VPC endpoint from instances associated with the security\n group.
" } } }, "traits": { - "smithy.api#documentation": "\n Provides details about the market (purchasing) options for Spot Instances.\n
" + "smithy.api#documentation": "An IP permission for an EC2 security group.
" + } + }, + "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermissionList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermission" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsAcceleratorCountDetails": { + "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRange": { "type": "structure", "members": { - "Max": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum number of accelerators. If this parameter isn't specified, there's no maximum limit. To exclude accelerator-enabled instance types, set Max
to 0
.\n
\n The minimum number of accelerators. If this parameter isn't specified, there's no minimum limit.\n
" + "smithy.api#documentation": "The IPv4 CIDR range. You can specify either a CIDR range or a source security group, but\n not both. To specify a single IPv4 address, use the /32 prefix length.
" } } }, "traits": { - "smithy.api#documentation": "\n The minimum and maximum number of accelerators (GPUs, FPGAs, or Amazon Web Services Inferentia chips) on an Amazon EC2 instance.\n
" + "smithy.api#documentation": "A range of IPv4 addresses.
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsAcceleratorTotalMemoryMiBDetails": { + "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRangeList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRange" + } + }, + "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6Range": { "type": "structure", "members": { - "Max": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum amount of memory, in MiB. If this parameter isn't specified, there's no maximum limit.\n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Integer", + "CidrIpv6": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum amount of memory, in MiB. If 0
is specified, there's no maximum limit.\n
The IPv6 CIDR range. You can specify either a CIDR range or a source security group, but\n not both. To specify a single IPv6 address, use the /128 prefix length.
" } } }, "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of memory, in MiB, for the accelerators on an Amazon EC2 instance.\n
" + "smithy.api#documentation": "A range of IPv6 addresses.
" + } + }, + "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6RangeList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6Range" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsBaselineEbsBandwidthMbpsDetails": { + "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListId": { "type": "structure", "members": { - "Max": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum baseline bandwidth, in Mbps. If this parameter is omitted, there's no maximum limit.\n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Integer", + "PrefixListId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum baseline bandwidth, in Mbps. If this parameter is omitted, there's no minimum limit.\n
" + "smithy.api#documentation": "The ID of the prefix.
" } } }, "traits": { - "smithy.api#documentation": "\n The minimum and maximum baseline bandwidth to Amazon Elastic Block Store (Amazon EBS), in Mbps. For more information, \n see Amazon EBS–optimized instances in the Amazon EC2 User Guide.\n
" + "smithy.api#documentation": "A prefix list ID.
" + } + }, + "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListId" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsDetails": { + "com.amazonaws.securityhub#AwsEc2SecurityGroupUserIdGroupPair": { "type": "structure", "members": { - "AcceleratorCount": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsAcceleratorCountDetails", + "GroupId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The minimum and maximum number of accelerators (GPUs, FPGAs, or Amazon Web Services Inferentia chips) on an instance.\n
" + "smithy.api#documentation": "The ID of the security group.
" } }, - "AcceleratorManufacturers": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "GroupName": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "Indicates whether instance types must have accelerators by specific manufacturers.\n
" + "smithy.api#documentation": "The name of the security group.
" } }, - "AcceleratorNames": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "PeeringStatus": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The accelerators that must be on the instance type.\n
" + "smithy.api#documentation": "The status of a VPC peering connection, if applicable.
" } }, - "AcceleratorTotalMemoryMiB": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsAcceleratorTotalMemoryMiBDetails", + "UserId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of total accelerator memory, in MiB.\n
" + "smithy.api#documentation": "The ID of an Amazon Web Services account.
\nFor a referenced security group in another VPC, the account ID of the referenced\n security group is returned in the response. If the referenced security group is deleted,\n this value is not returned.
\n[EC2-Classic] Required when adding or removing rules that reference a security group in\n another VPC.
" } }, - "AcceleratorTypes": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "VpcId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The accelerator types that must be on the instance type.\n
" + "smithy.api#documentation": "The ID of the VPC for the referenced security group, if applicable.
" } }, - "BareMetal": { + "VpcPeeringConnectionId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "Indicates whether bare metal instance types must be included, excluded, or required.\n
" + "smithy.api#documentation": "The ID of the VPC peering connection, if applicable.
" } - }, - "BaselineEbsBandwidthMbps": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsBaselineEbsBandwidthMbpsDetails", + } + }, + "traits": { + "smithy.api#documentation": "A relationship between a security group and a user.
" + } + }, + "com.amazonaws.securityhub#AwsEc2SecurityGroupUserIdGroupPairList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupUserIdGroupPair" + } + }, + "com.amazonaws.securityhub#AwsEc2SubnetDetails": { + "type": "structure", + "members": { + "AssignIpv6AddressOnCreation": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "\n The minimum and maximum baseline bandwidth to Amazon EBS, in Mbps. For more information, see Amazon EBS optimized instances in the Amazon EC2 User Guide.\n
" + "smithy.api#default": false, + "smithy.api#documentation": "Whether to assign an IPV6 address to a network interface that is created in this subnet.
" } }, - "BurstablePerformance": { + "AvailabilityZone": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Indicates whether burstable performance T instance types are included, excluded, or required. For more information, Burstable performance instances in the Amazon EC2 User Guide.\n
" - } - }, - "CpuManufacturers": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n The CPU manufacturers to include.\n
" + "smithy.api#documentation": "The Availability Zone for the subnet.
" } }, - "ExcludedInstanceTypes": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "AvailabilityZoneId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The instance types to exclude.\n
" + "smithy.api#documentation": "The identifier of the Availability Zone for the subnet.
" } }, - "InstanceGenerations": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", + "AvailableIpAddressCount": { + "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "\n Indicates whether current or previous generation instance types are included.\n
" + "smithy.api#default": 0, + "smithy.api#documentation": "The number of available IPV4 addresses in the subnet. Does not include addresses for stopped instances.
" } }, - "LocalStorage": { + "CidrBlock": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Indicates whether instance types with instance store volumes are included, excluded, or required. For more information, see Amazon EC2 instance store in the Amazon EC2 User Guide.\n
" - } - }, - "LocalStorageTypes": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n The type of local storage that is required.\n
" + "smithy.api#documentation": "The IPV4 CIDR block that is assigned to the subnet.
" } }, - "MemoryGiBPerVCpu": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsMemoryGiBPerVCpuDetails", + "DefaultForAz": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of memory per vCPU, in GiB.\n
" + "smithy.api#default": false, + "smithy.api#documentation": "Whether this subnet is the default subnet for the Availability Zone.
" } }, - "MemoryMiB": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsMemoryMiBDetails", + "MapPublicIpOnLaunch": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of memory, in MiB.\n
" + "smithy.api#default": false, + "smithy.api#documentation": "Whether instances in this subnet receive a public IP address.
" } }, - "NetworkInterfaceCount": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsNetworkInterfaceCountDetails", + "OwnerId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The minimum and maximum number of network interfaces.\n
" + "smithy.api#documentation": "The identifier of the Amazon Web Services account that owns the subnet.
" } }, - "OnDemandMaxPricePercentageOverLowestPrice": { - "target": "com.amazonaws.securityhub#Integer", + "State": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The price protection threshold for On-Demand Instances. This is the maximum you’ll pay for an On-Demand Instance, expressed as a percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nA high value, such as 999999
, turns off price protection.
The current state of the subnet. Valid values are available
or pending
.
\n Indicates whether instance types must support hibernation for On-Demand Instances.\n
" + "smithy.api#documentation": "The ARN of the subnet.
" } }, - "SpotMaxPricePercentageOverLowestPrice": { - "target": "com.amazonaws.securityhub#Integer", + "SubnetId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The price protection threshold for Spot Instances. This is the maximum you’ll pay for a Spot Instance, expressed as a \n percentage above the least expensive current generation M, C, or R instance type with your specified attributes. When \n Amazon EC2 selects instance types with your attributes, it excludes instance types priced above your threshold.\n
\nThe parameter accepts an integer, which Amazon EC2 interprets as a percentage.
\nA high value, such as 999999
, turns off price protection.
The identifier of the subnet.
" } }, - "TotalLocalStorageGB": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsTotalLocalStorageGBDetails", + "VpcId": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of total local storage, in GB.\n
" + "smithy.api#documentation": "The identifier of the VPC that contains the subnet.
" } }, - "VCpuCount": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsVCpuCountDetails", + "Ipv6CidrBlockAssociationSet": { + "target": "com.amazonaws.securityhub#Ipv6CidrBlockAssociationList", "traits": { - "smithy.api#documentation": "\n The minimum and maximum number of vCPUs.\n
" + "smithy.api#documentation": "The IPV6 CIDR blocks that are associated with the subnet.
" } } }, "traits": { - "smithy.api#documentation": "\n The attributes for the Amazon EC2 instance types.\n
" + "smithy.api#documentation": "Contains information about a subnet in Amazon EC2.
" } }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsMemoryGiBPerVCpuDetails": { + "com.amazonaws.securityhub#AwsEc2TransitGatewayDetails": { "type": "structure", "members": { - "Max": { - "target": "com.amazonaws.securityhub#Double", + "Id": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum amount of memory per vCPU, in GiB. If this parameter is omitted, there's no maximum limit.\n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Double", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum amount of memory per vCPU, in GiB. If this parameter is omitted, there's no maximum limit.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of memory per vCPU, in GiB.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsMemoryMiBDetails": { - "type": "structure", - "members": { - "Max": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum amount of memory, in MiB.\n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum amount of memory, in MiB. \n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of memory, in MiB, for an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsNetworkInterfaceCountDetails": { - "type": "structure", - "members": { - "Max": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum number of network interfaces. \n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum number of network interfaces.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n The minimum and maximum number of network interfaces to be attached to an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsTotalLocalStorageGBDetails": { - "type": "structure", - "members": { - "Max": { - "target": "com.amazonaws.securityhub#Double", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum amount of total local storage, in GB.\n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Double", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum amount of total local storage, in GB.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n The minimum and maximum amount of total local storage, in GB, that an Amazon EC2 instance uses.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataInstanceRequirementsVCpuCountDetails": { - "type": "structure", - "members": { - "Max": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The maximum number of vCPUs. \n
" - } - }, - "Min": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The minimum number of vCPUs.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n The minimum and maximum number of vCPUs for an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataLicenseSetDetails": { - "type": "structure", - "members": { - "LicenseConfigurationArn": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the license configuration.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Provides details about the license configuration for an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataLicenseSetList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataLicenseSetDetails" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataMaintenanceOptionsDetails": { - "type": "structure", - "members": { - "AutoRecovery": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n Disables the automatic recovery behavior of your instance or sets it to default.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n The maintenance options of an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataMetadataOptionsDetails": { - "type": "structure", - "members": { - "HttpEndpoint": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n Enables or disables the HTTP metadata endpoint on your instances. If the parameter is not specified, the default state is enabled, and you won’t be able to access your instance metadata.\n
" - } - }, - "HttpProtocolIpv6": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n Enables or disables the IPv6 endpoint for the instance metadata service.\n
" - } - }, - "HttpTokens": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The state of token usage for your instance metadata requests.\n
" - } - }, - "HttpPutResponseHopLimit": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The desired HTTP PUT response hop limit for instance metadata requests. The larger \n the number, the further instance metadata requests can travel.\n
" - } - }, - "InstanceMetadataTags": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n When set to enabled
, this parameter allows access to instance tags from the instance metadata. When set to disabled
, it turns off access to instance tags from the instance metadata. For more information, see \nWork with instance tags in instance metadata in the Amazon EC2 User Guide.\n
\n Specifies the metadata options for an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataMonitoringDetails": { - "type": "structure", - "members": { - "Enabled": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Enables detailed monitoring when true
is specified. Otherwise, basic monitoring is enabled. \n For more information about detailed monitoring, see \n Enable or turn off detailed monitoring for your instances in the Amazon EC2 User Guide. \n
\n The monitoring for an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetDetails": { - "type": "structure", - "members": { - "AssociateCarrierIpAddress": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether to associate a Carrier IP address with eth0 for a new network interface. \n You use this option when you launch an instance in a Wavelength Zone and want to \n associate a Carrier IP address with the network interface. For more information, see \n Carrier IP address in the Wavelength Developer Guide.\n
" - } - }, - "AssociatePublicIpAddress": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Associates a public IPv4 address with eth0 for a new network interface.\n
" - } - }, - "DeleteOnTermination": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether the network interface is deleted when the instance is terminated.\n
" - } - }, - "Description": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n A description for the network interface.\n
" - } - }, - "DeviceIndex": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The device index for the network interface attachment.\n
" - } - }, - "Groups": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n The IDs of one or more security groups.\n
" - } - }, - "InterfaceType": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The type of network interface.\n
" - } - }, - "Ipv4PrefixCount": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of IPv4 prefixes to be automatically assigned to the network interface. \n You cannot use this option if you use the Ipv4Prefixes
option.\n
\n One or more IPv4 prefixes to be assigned to the network interface. You cannot use \n this option if you use the Ipv4PrefixCount
option.\n
\n The number of IPv6 addresses to assign to a network interface. Amazon EC2 \n automatically selects the IPv6 addresses from the subnet range. You can't use this \n option if you use Ipv6Addresses
.\n
\n One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet. \n You can't use this option if you use Ipv6AddressCount
.\n
\n The number of IPv6 prefixes to be automatically assigned to the network interface. \n You cannot use this option if you use the Ipv6Prefix
option.\n
\n One or more IPv6 prefixes to be assigned to the network interface. You cannot use \n this option if you use the Ipv6PrefixCount
option.\n
\n The index of the network card. Some instance types support multiple network cards. \n The primary network interface must be assigned to network card index 0
. The default \n is network card index 0
.\n
\n The ID of the network interface.\n
" - } - }, - "PrivateIpAddress": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The primary private IPv4 address of the network interface.\n
" - } - }, - "PrivateIpAddresses": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetPrivateIpAddressesList", - "traits": { - "smithy.api#documentation": "\n One or more private IPv4 addresses.\n
" - } - }, - "SecondaryPrivateIpAddressCount": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of secondary private IPv4 addresses to assign to a network interface.\n
" - } - }, - "SubnetId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The ID of the subnet for the network interface.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n One or more network interfaces to attach to an Amazon EC2 instance. If you specify a network interface, you must specify \n security groups and subnets as part of the network interface.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv4PrefixesDetails": { - "type": "structure", - "members": { - "Ipv4Prefix": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The IPv4 prefix. For more information, see Assigning prefixes to Amazon EC2 network interfaces in the Amazon Elastic Compute Cloud User Guide.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Provides details on one or more IPv4 prefixes for a network interface. \n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv4PrefixesList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv4PrefixesDetails" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv6AddressesDetails": { - "type": "structure", - "members": { - "Ipv6Address": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n One or more specific IPv6 addresses from the IPv6 CIDR block range of your subnet. \n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies an IPv6 address in an Amazon EC2 launch template.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv6AddressesList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv6AddressesDetails" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv6PrefixesDetails": { - "type": "structure", - "members": { - "Ipv6Prefix": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The IPv6 prefix.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Provides details on one or more IPv6 prefixes to be assigned to the network interface.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv6PrefixesList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetIpv6PrefixesDetails" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetDetails" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetPrivateIpAddressesDetails": { - "type": "structure", - "members": { - "Primary": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether the private IPv4 address is the primary private IPv4 address. Only one IPv4 address can be designated as primary.\n
" - } - }, - "PrivateIpAddress": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The private IPv4 address.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n One or more private IPv4 addresses.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetPrivateIpAddressesList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataNetworkInterfaceSetPrivateIpAddressesDetails" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataPlacementDetails": { - "type": "structure", - "members": { - "Affinity": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The affinity setting for an instance on an EC2 Dedicated Host.\n
" - } - }, - "AvailabilityZone": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The Availability Zone for the instance.\n
" - } - }, - "GroupName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The name of the placement group for the instance.\n
" - } - }, - "HostId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The ID of the Dedicated Host for the instance.\n
" - } - }, - "HostResourceGroupArn": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the host resource group in which to launch the instances. \n
" - } - }, - "PartitionNumber": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The number of the partition the instance should launch in.\n
" - } - }, - "SpreadDomain": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n Reserved for future use.\n
" - } - }, - "Tenancy": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Provides details about the placement of an Amazon EC2 instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataPrivateDnsNameOptionsDetails": { - "type": "structure", - "members": { - "EnableResourceNameDnsAAAARecord": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.\n
" - } - }, - "EnableResourceNameDnsARecord": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether to respond to DNS queries for instance hostnames with DNS A records.\n
" - } - }, - "HostnameType": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The type of hostname for EC2 instances. \n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Describes the options for Amazon EC2 instance hostnames.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2LaunchTemplateDetails": { - "type": "structure", - "members": { - "LaunchTemplateName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n A name for the launch template.\n
" - } - }, - "Id": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n An ID for the launch template.\n
" - } - }, - "LaunchTemplateData": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDataDetails", - "traits": { - "smithy.api#documentation": "\n The information to include in the launch template.\n
" - } - }, - "DefaultVersionNumber": { - "target": "com.amazonaws.securityhub#Long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The default version of the launch template.\n
" - } - }, - "LatestVersionNumber": { - "target": "com.amazonaws.securityhub#Long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The latest version of the launch template.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies the properties for creating an Amazon Elastic Compute Cloud (Amazon EC2) launch template.\n
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkAclAssociation": { - "type": "structure", - "members": { - "NetworkAclAssociationId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the association between the network ACL and the subnet.
" - } - }, - "NetworkAclId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the network ACL.
" - } - }, - "SubnetId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the subnet that is associated with the network ACL.
" - } - } - }, - "traits": { - "smithy.api#documentation": "An association between the network ACL and a subnet.
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkAclAssociationList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkAclAssociation" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkAclDetails": { - "type": "structure", - "members": { - "IsDefault": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Whether this is the default network ACL for the VPC.
" - } - }, - "NetworkAclId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the network ACL.
" - } - }, - "OwnerId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the Amazon Web Services account that owns the network ACL.
" - } - }, - "VpcId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the VPC for the network ACL.
" - } - }, - "Associations": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkAclAssociationList", - "traits": { - "smithy.api#documentation": "Associations between the network ACL and subnets.
" - } - }, - "Entries": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkAclEntryList", - "traits": { - "smithy.api#documentation": "The set of rules in the network ACL.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Contains details about an Amazon EC2 network access control list (ACL).
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkAclEntry": { - "type": "structure", - "members": { - "CidrBlock": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IPV4 network range for which to deny or allow access.
" - } - }, - "Egress": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Whether the rule is an egress rule. An egress rule is a rule that applies to traffic that leaves the subnet.
" - } - }, - "IcmpTypeCode": { - "target": "com.amazonaws.securityhub#IcmpTypeCode", - "traits": { - "smithy.api#documentation": "The Internet Control Message Protocol (ICMP) type and code for which to deny or allow access.
" - } - }, - "Ipv6CidrBlock": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IPV6 network range for which to deny or allow access.
" - } - }, - "PortRange": { - "target": "com.amazonaws.securityhub#PortRangeFromTo", - "traits": { - "smithy.api#documentation": "For TCP or UDP protocols, the range of ports that the rule applies to.
" - } - }, - "Protocol": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The protocol that the rule applies to. To deny or allow access to all protocols, use the\n value -1
.
Whether the rule is used to allow access or deny access.
" - } - }, - "RuleNumber": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The rule number. The rules are processed in order by their number.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A rule for the network ACL. Each rule allows or denies access based on the IP address, traffic direction, port, and protocol.
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkAclEntryList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkAclEntry" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfaceAttachment": { - "type": "structure", - "members": { - "AttachTime": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "Indicates when the attachment initiated.
\nUses the date-time
format specified in RFC 3339 section 5.6, Internet\n Date/Time Format. The value cannot contain spaces. For example,\n 2020-03-22T13:22:13.933Z
.
The identifier of the network interface attachment
" - } - }, - "DeleteOnTermination": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether the network interface is deleted when the instance is\n terminated.
" - } - }, - "DeviceIndex": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The device index of the network interface attachment on the instance.
" - } - }, - "InstanceId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the instance.
" - } - }, - "InstanceOwnerId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The Amazon Web Services account ID of the owner of the instance.
" - } - }, - "Status": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The attachment state.
\nValid values: attaching
| attached
| detaching
|\n detached
\n
Information about the network interface attachment.
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfaceDetails": { - "type": "structure", - "members": { - "Attachment": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceAttachment", - "traits": { - "smithy.api#documentation": "The network interface attachment.
" - } - }, - "NetworkInterfaceId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the network interface.
" - } - }, - "SecurityGroups": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroupList", - "traits": { - "smithy.api#documentation": "Security groups for the network interface.
" - } - }, - "SourceDestCheck": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Indicates whether traffic to or from the instance is validated.
" - } - }, - "IpV6Addresses": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressList", - "traits": { - "smithy.api#documentation": "The IPv6 addresses associated with the network interface.
" - } - }, - "PrivateIpAddresses": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressList", - "traits": { - "smithy.api#documentation": "The private IPv4 addresses associated with the network interface.
" - } - }, - "PublicDnsName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The public DNS name of the network interface.
" - } - }, - "PublicIp": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The address of the Elastic IP address bound to the network interface.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Details about the network interface
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressDetail": { - "type": "structure", - "members": { - "IpV6Address": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IPV6 address.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Provides information about an IPV6 address that is associated with the network\n interface.
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceIpV6AddressDetail" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressDetail": { - "type": "structure", - "members": { - "PrivateIpAddress": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IP address.
" - } - }, - "PrivateDnsName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The private DNS name for the IP address.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Provides information about a private IPv4 address that is with the network\n interface.
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfacePrivateIpAddressDetail" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroup": { - "type": "structure", - "members": { - "GroupName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the security group.
" - } - }, - "GroupId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the security group.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A security group associated with the network interface.
" - } - }, - "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroupList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2NetworkInterfaceSecurityGroup" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupDetails": { - "type": "structure", - "members": { - "GroupName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the security group.
" - } - }, - "GroupId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the security group.
" - } - }, - "OwnerId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The Amazon Web Services account ID of the owner of the security group.
" - } - }, - "VpcId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "[VPC only] The ID of the VPC for the security group.
" - } - }, - "IpPermissions": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermissionList", - "traits": { - "smithy.api#documentation": "The inbound rules associated with the security group.
" - } - }, - "IpPermissionsEgress": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermissionList", - "traits": { - "smithy.api#documentation": "[VPC only] The outbound rules associated with the security group.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Details about an Amazon EC2 security group.
" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermission": { - "type": "structure", - "members": { - "IpProtocol": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IP protocol name (tcp
, udp
, icmp
,\n icmpv6
) or number.
[VPC only] Use -1
to specify all protocols.
When authorizing security group rules, specifying -1
\n or a protocol number other than tcp
, udp
, icmp
, or\n icmpv6
allows traffic on all ports, regardless of any port range you\n specify.
For tcp
, udp
, and icmp
, you must specify a port\n range.
For icmpv6
, the port range is optional. If you omit the port range, traffic\n for all types and codes is allowed.
The start of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 type\n number.
\nA value of -1 indicates all ICMP/ICMPv6 types. If you specify all ICMP/ICMPv6 types, you\n must specify all codes.
" - } - }, - "ToPort": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The end of the port range for the TCP and UDP protocols, or an ICMP/ICMPv6 code.
\nA value of -1
indicates all ICMP/ICMPv6 codes. If you specify all ICMP/ICMPv6 types, you must specify all\n codes.
The security group and Amazon Web Services account ID pairs.
" - } - }, - "IpRanges": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRangeList", - "traits": { - "smithy.api#documentation": "The IPv4 ranges.
" - } - }, - "Ipv6Ranges": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6RangeList", - "traits": { - "smithy.api#documentation": "The IPv6 ranges.
" - } - }, - "PrefixListIds": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListIdList", - "traits": { - "smithy.api#documentation": "[VPC only] The prefix list IDs for an Amazon Web Services service. With outbound rules, this is the Amazon Web Services\n service to access through a VPC endpoint from instances associated with the security\n group.
" - } - } - }, - "traits": { - "smithy.api#documentation": "An IP permission for an EC2 security group.
" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermissionList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpPermission" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRange": { - "type": "structure", - "members": { - "CidrIp": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IPv4 CIDR range. You can specify either a CIDR range or a source security group, but\n not both. To specify a single IPv4 address, use the /32 prefix length.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A range of IPv4 addresses.
" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRangeList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpRange" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6Range": { - "type": "structure", - "members": { - "CidrIpv6": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IPv6 CIDR range. You can specify either a CIDR range or a source security group, but\n not both. To specify a single IPv6 address, use the /128 prefix length.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A range of IPv6 addresses.
" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6RangeList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupIpv6Range" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListId": { - "type": "structure", - "members": { - "PrefixListId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the prefix.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A prefix list ID.
" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListIdList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupPrefixListId" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupUserIdGroupPair": { - "type": "structure", - "members": { - "GroupId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the security group.
" - } - }, - "GroupName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the security group.
" - } - }, - "PeeringStatus": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The status of a VPC peering connection, if applicable.
" - } - }, - "UserId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of an Amazon Web Services account.
\nFor a referenced security group in another VPC, the account ID of the referenced\n security group is returned in the response. If the referenced security group is deleted,\n this value is not returned.
\n[EC2-Classic] Required when adding or removing rules that reference a security group in\n another VPC.
" - } - }, - "VpcId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the VPC for the referenced security group, if applicable.
" - } - }, - "VpcPeeringConnectionId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the VPC peering connection, if applicable.
" - } - } - }, - "traits": { - "smithy.api#documentation": "A relationship between a security group and a user.
" - } - }, - "com.amazonaws.securityhub#AwsEc2SecurityGroupUserIdGroupPairList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsEc2SecurityGroupUserIdGroupPair" - } - }, - "com.amazonaws.securityhub#AwsEc2SubnetDetails": { - "type": "structure", - "members": { - "AssignIpv6AddressOnCreation": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Whether to assign an IPV6 address to a network interface that is created in this subnet.
" - } - }, - "AvailabilityZone": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The Availability Zone for the subnet.
" - } - }, - "AvailabilityZoneId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the Availability Zone for the subnet.
" - } - }, - "AvailableIpAddressCount": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The number of available IPV4 addresses in the subnet. Does not include addresses for stopped instances.
" - } - }, - "CidrBlock": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The IPV4 CIDR block that is assigned to the subnet.
" - } - }, - "DefaultForAz": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Whether this subnet is the default subnet for the Availability Zone.
" - } - }, - "MapPublicIpOnLaunch": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "Whether instances in this subnet receive a public IP address.
" - } - }, - "OwnerId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the Amazon Web Services account that owns the subnet.
" - } - }, - "State": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The current state of the subnet. Valid values are available
or pending
.
The ARN of the subnet.
" - } - }, - "SubnetId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the subnet.
" - } - }, - "VpcId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the VPC that contains the subnet.
" - } - }, - "Ipv6CidrBlockAssociationSet": { - "target": "com.amazonaws.securityhub#Ipv6CidrBlockAssociationList", - "traits": { - "smithy.api#documentation": "The IPV6 CIDR blocks that are associated with the subnet.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Contains information about a subnet in Amazon EC2.
" - } - }, - "com.amazonaws.securityhub#AwsEc2TransitGatewayDetails": { - "type": "structure", - "members": { - "Id": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the transit gateway.\n
" + "smithy.api#documentation": "The ID of the transit gateway.\n
" } }, "Description": { @@ -6377,7 +5105,7 @@ "Status": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The attachment state of the volume. Valid values are as follows:
\n\n attaching
\n
\n attached
\n
\n busy
\n
\n detaching
\n
\n detached
\n
The attachment state of the volume. Valid values are as follows:
\n \t\n attaching
\n
\n attached
\n
\n busy
\n
\n detaching
\n
\n detached
\n
The volume state. Valid values are as follows:
\n\n available
\n
\n creating
\n
\n deleted
\n
\n deleting
\n
\n error
\n
\n in-use
\n
The volume state. Valid values are as follows:
\n \t\n available
\n
\n creating
\n
\n deleted
\n
\n deleting
\n
\n error
\n
\n in-use
\n
The current state of the service. Valid values are as follows:
\n\n Available
\n
\n Deleted
\n
\n Deleting
\n
\n Failed
\n
\n Pending
\n
The current state of the service. Valid values are as follows:
\n \t\n Available
\n
\n Deleted
\n
\n Deleting
\n
\n Failed
\n
\n Pending
\n
The current state of the VPN connection. Valid values are as follows:
\n\n available
\n
\n deleted
\n
\n deleting
\n
\n pending
\n
The current state of the VPN connection. Valid values are as follows:
\n \t\n available
\n
\n deleted
\n
\n deleting
\n
\n pending
\n
The architecture of the image. Valid values are as follows:
\n\n arm64
\n
\n i386
\n
\n x86_64
\n
The architecture of the image. Valid values are as follows:
\n \t\n arm64
\n
\n i386
\n
\n x86_64
\n
The dependency condition of the dependent container. Indicates the required status of the dependent container before the current container can start. Valid values are as follows:
\n\n COMPLETE
\n
\n HEALTHY
\n
\n SUCCESS
\n
\n START
\n
The dependency condition of the dependent container. Indicates the required status of the dependent container before the current container can start. Valid values are as follows:
\n \t\n COMPLETE
\n
\n HEALTHY
\n
\n SUCCESS
\n
\n START
\n
The Linux capabilities for the container that are added to the default configuration provided by Docker. Valid values are as follows:
\nValid values: \"ALL\"
| \"AUDIT_CONTROL\"
| \"AUDIT_WRITE\"
|\n \t\t\"BLOCK_SUSPEND\"
| \"CHOWN\"
| \"DAC_OVERRIDE\"
|\n \t\t\"DAC_READ_SEARCH\"
| \"FOWNER\"
| \"FSETID\"
|\n \t\t\"IPC_LOCK\"
| \"IPC_OWNER\"
| \"KILL\"
|\n \t\t\"LEASE\"
| \"LINUX_IMMUTABLE\"
| \"MAC_ADMIN\"
|\n \t\t\t\"MAC_OVERRIDE\"
| \"MKNOD\"
| \"NET_ADMIN\"
|\n \t\t\"NET_BIND_SERVICE\"
| \"NET_BROADCAST\"
| \"NET_RAW\"
|\n \t\t\"SETFCAP\"
| \"SETGID\"
| \"SETPCAP\"
|\n \t\t\"SETUID\"
| \"SYS_ADMIN\"
| \"SYS_BOOT\"
|\n \t\t\"SYS_CHROOT\"
| \"SYS_MODULE\"
| \"SYS_NICE\"
|\n \t\t\"SYS_PACCT\"
| \"SYS_PTRACE\"
| \"SYS_RAWIO\"
|\n \t\t\"SYS_RESOURCE\"
| \"SYS_TIME\"
| \"SYS_TTY_CONFIG\"
|\n \t\t\"SYSLOG\"
| \"WAKE_ALARM\"
\n
The Linux capabilities for the container that are added to the default configuration provided by Docker. Valid values are as follows:
\n \tValid values: \"ALL\"
| \"AUDIT_CONTROL\"
| \"AUDIT_WRITE\"
|\n \t\t\"BLOCK_SUSPEND\"
| \"CHOWN\"
| \"DAC_OVERRIDE\"
|\n \t\t\"DAC_READ_SEARCH\"
| \"FOWNER\"
| \"FSETID\"
|\n \t\t\"IPC_LOCK\"
| \"IPC_OWNER\"
| \"KILL\"
|\n \t\t\"LEASE\"
| \"LINUX_IMMUTABLE\"
| \"MAC_ADMIN\"
|\n \t\t\t\"MAC_OVERRIDE\"
| \"MKNOD\"
| \"NET_ADMIN\"
|\n \t\t\"NET_BIND_SERVICE\"
| \"NET_BROADCAST\"
| \"NET_RAW\"
|\n \t\t\"SETFCAP\"
| \"SETGID\"
| \"SETPCAP\"
|\n \t\t\"SETUID\"
| \"SYS_ADMIN\"
| \"SYS_BOOT\"
|\n \t\t\"SYS_CHROOT\"
| \"SYS_MODULE\"
| \"SYS_NICE\"
|\n \t\t\"SYS_PACCT\"
| \"SYS_PTRACE\"
| \"SYS_RAWIO\"
|\n \t\t\"SYS_RESOURCE\"
| \"SYS_TIME\"
| \"SYS_TTY_CONFIG\"
|\n \t\t\"SYSLOG\"
| \"WAKE_ALARM\"
\n
The Linux capabilities for the container that are dropped from the default configuration provided by Docker.
\nValid values: \"ALL\"
| \"AUDIT_CONTROL\"
| \"AUDIT_WRITE\"
|\n \t\t\"BLOCK_SUSPEND\"
| \"CHOWN\"
| \"DAC_OVERRIDE\"
|\n \t\t\"DAC_READ_SEARCH\"
| \"FOWNER\"
| \"FSETID\"
|\n \t\t\"IPC_LOCK\"
| \"IPC_OWNER\"
| \"KILL\"
|\n \t\t\"LEASE\"
| \"LINUX_IMMUTABLE\"
| \"MAC_ADMIN\"
|\n \t\t\t\"MAC_OVERRIDE\"
| \"MKNOD\"
| \"NET_ADMIN\"
|\n \t\t\"NET_BIND_SERVICE\"
| \"NET_BROADCAST\"
| \"NET_RAW\"
|\n \t\t\"SETFCAP\"
| \"SETGID\"
| \"SETPCAP\"
|\n \t\t\"SETUID\"
| \"SYS_ADMIN\"
| \"SYS_BOOT\"
|\n \t\t\"SYS_CHROOT\"
| \"SYS_MODULE\"
| \"SYS_NICE\"
|\n \t\t\"SYS_PACCT\"
| \"SYS_PTRACE\"
| \"SYS_RAWIO\"
|\n \t\t\"SYS_RESOURCE\"
| \"SYS_TIME\"
| \"SYS_TTY_CONFIG\"
|\n \t\t\"SYSLOG\"
| \"WAKE_ALARM\"
\n
The Linux capabilities for the container that are dropped from the default configuration provided by Docker.
\n \tValid values: \"ALL\"
| \"AUDIT_CONTROL\"
| \"AUDIT_WRITE\"
|\n \t\t\"BLOCK_SUSPEND\"
| \"CHOWN\"
| \"DAC_OVERRIDE\"
|\n \t\t\"DAC_READ_SEARCH\"
| \"FOWNER\"
| \"FSETID\"
|\n \t\t\"IPC_LOCK\"
| \"IPC_OWNER\"
| \"KILL\"
|\n \t\t\"LEASE\"
| \"LINUX_IMMUTABLE\"
| \"MAC_ADMIN\"
|\n \t\t\t\"MAC_OVERRIDE\"
| \"MKNOD\"
| \"NET_ADMIN\"
|\n \t\t\"NET_BIND_SERVICE\"
| \"NET_BROADCAST\"
| \"NET_RAW\"
|\n \t\t\"SETFCAP\"
| \"SETGID\"
| \"SETPCAP\"
|\n \t\t\"SETUID\"
| \"SYS_ADMIN\"
| \"SYS_BOOT\"
|\n \t\t\"SYS_CHROOT\"
| \"SYS_MODULE\"
| \"SYS_NICE\"
|\n \t\t\"SYS_PACCT\"
| \"SYS_PTRACE\"
| \"SYS_RAWIO\"
|\n \t\t\"SYS_RESOURCE\"
| \"SYS_TIME\"
| \"SYS_TTY_CONFIG\"
|\n \t\t\"SYSLOG\"
| \"WAKE_ALARM\"
\n
The list of tmpfs volume mount options.
\nValid values: \"defaults\"
| \"ro\"
| \"rw\"
| \"suid\"
|\n \t\t\"nosuid\"
| \"dev\"
| \"nodev\"
| \"exec\"
|\n \t\t\"noexec\"
| \"sync\"
| \"async\"
| \"dirsync\"
\n \t\t| \"remount\"
| \"mand\"
| \"nomand\"
| \"atime\"
\n \t\t| \"noatime\"
| \"diratime\"
| \"nodiratime\"
|\n \t\t\"bind\"
| \"rbind\"
| \"unbindable\"
|\n \t\t\"runbindable\"
| \"private\"
| \"rprivate\"
|\n \t\t\"shared\"
| \"rshared\"
| \"slave\"
|\n \t\t\"rslave\"
| \"relatime\"
| \"norelatime\"
|\n \t\t\"strictatime\"
| \"nostrictatime\"
| \"mode\"
|\n \t\t\"uid\"
| \"gid\"
| \"nr_inodes\"
|\n \t\t\t\"nr_blocks\"
| \"mpol\"
\n
The list of tmpfs volume mount options.
\n \tValid values: \"defaults\"
| \"ro\"
| \"rw\"
| \"suid\"
|\n \t\t\"nosuid\"
| \"dev\"
| \"nodev\"
| \"exec\"
|\n \t\t\"noexec\"
| \"sync\"
| \"async\"
| \"dirsync\"
\n \t\t| \"remount\"
| \"mand\"
| \"nomand\"
| \"atime\"
\n \t\t| \"noatime\"
| \"diratime\"
| \"nodiratime\"
|\n \t\t\"bind\"
| \"rbind\"
| \"unbindable\"
|\n \t\t\"runbindable\"
| \"private\"
| \"rprivate\"
|\n \t\t\"shared\"
| \"rshared\"
| \"slave\"
|\n \t\t\"rslave\"
| \"relatime\"
| \"norelatime\"
|\n \t\t\"strictatime\"
| \"nostrictatime\"
| \"mode\"
|\n \t\t\"uid\"
| \"gid\"
| \"nr_inodes\"
|\n \t\t\t\"nr_blocks\"
| \"mpol\"
\n
The log driver to use for the container.
\nValid values on Fargate are as follows:
\n\n awsfirelens
\n
\n awslogs
\n
\n splunk
\n
Valid values on Amazon EC2 are as follows:
\n\n awsfirelens
\n
\n awslogs
\n
\n fluentd
\n
\n gelf
\n
\n journald
\n
\n json-file
\n
\n logentries
\n
\n splunk
\n
\n syslog
\n
The log driver to use for the container.
\n \tValid values on Fargate are as follows:
\n \t\n awsfirelens
\n
\n awslogs
\n
\n splunk
\n
Valid values on Amazon EC2 are as follows:
\n \t\n awsfirelens
\n
\n awslogs
\n
\n fluentd
\n
\n gelf
\n
\n journald
\n
\n json-file
\n
\n logentries
\n
\n splunk
\n
\n syslog
\n
The type of the ulimit. Valid values are as follows:
\n\n core
\n
\n cpu
\n
\n data
\n
\n fsize
\n
\n locks
\n
\n memlock
\n
\n msgqueue
\n
\n nice
\n
\n nofile
\n
\n nproc
\n
\n rss
\n
\n rtprio
\n
\n rttime
\n
\n sigpending
\n
\n stack
\n
The type of the ulimit. Valid values are as follows:
\n \t\n core
\n
\n cpu
\n
\n data
\n
\n fsize
\n
\n locks
\n
\n memlock
\n
\n msgqueue
\n
\n nice
\n
\n nofile
\n
\n nproc
\n
\n rss
\n
\n rtprio
\n
\n rttime
\n
\n sigpending
\n
\n stack
\n
The number of CPU units used by the task.Valid values are as follows:
\n\n 256 (.25 vCPU)
\n
\n 512 (.5 vCPU)
\n
\n 1024 (1 vCPU)
\n
\n 2048 (2 vCPU)
\n
\n 4096 (4 vCPU)
\n
The number of CPU units used by the task.Valid values are as follows:
\n \t\n 256 (.25 vCPU)
\n
\n 512 (.5 vCPU)
\n
\n 1024 (1 vCPU)
\n
\n 2048 (2 vCPU)
\n
\n 4096 (4 vCPU)
\n
The inter-process communication (IPC) resource namespace to use for the containers in the task. Valid values are as follows:
\n\n host
\n
\n none
\n
\n task
\n
The inter-process communication (IPC) resource namespace to use for the containers in the task. Valid values are as follows:
\n \t\n host
\n
\n none
\n
\n task
\n
The Docker networking mode to use for the containers in the task. Valid values are as follows:
\n\n awsvpc
\n
\n bridge
\n
\n host
\n
\n none
\n
The Docker networking mode to use for the containers in the task. Valid values are as follows:
\n \t\n awsvpc
\n
\n bridge
\n
\n host
\n
\n none
\n
The status of the cluster. Valid values are as follows:
\n\n ACTIVE
\n
\n CREATING
\n
\n DELETING
\n
\n FAILED
\n
\n PENDING
\n
\n UPDATING
\n
The status of the cluster. Valid values are as follows:
\n \t\n ACTIVE
\n
\n CREATING
\n
\n DELETING
\n
\n FAILED
\n
\n PENDING
\n
\n UPDATING
\n
A list of logging types. Valid values are as follows:
\n\n api
\n
\n audit
\n
\n authenticator
\n
\n controllerManager
\n
\n scheduler
\n
A list of logging types. Valid values are as follows:
\n \t\n api
\n
\n audit
\n
\n authenticator
\n
\n controllerManager
\n
\n scheduler
\n
The current operational status of the environment. Valid values are as follows:
\n\n Aborting
\n
\n Launching
\n
\n LinkingFrom
\n
\n LinkingTo
\n
\n Ready
\n
\n Terminated
\n
\n Terminating
\n
\n Updating
\n
The current operational status of the environment. Valid values are as follows:
\n \t\n Aborting
\n
\n Launching
\n
\n LinkingFrom
\n
\n LinkingTo
\n
\n Ready
\n
\n Terminated
\n
\n Terminating
\n
\n Updating
\n
The hardware configuration of the computer that hosts the dedicated master node. A sample value is m3.medium.elasticsearch
. If this attribute is specified, then DedicatedMasterEnabled
must be true
.
For a list of valid values, see Supported instance types in Amazon OpenSearch Service in the Amazon OpenSearch Service Developer Guide.
" + "smithy.api#documentation": "The hardware configuration of the computer that hosts the dedicated master node. A sample value is m3.medium.elasticsearch
. If this attribute is specified, then DedicatedMasterEnabled
must be true
.
For a list of valid values, see Supported instance types in Amazon OpenSearch Service in the Amazon OpenSearch Service Developer Guide.
" } }, "InstanceCount": { @@ -9634,7 +8362,7 @@ "InstanceType": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The instance type for your data nodes. For example, m3.medium.elasticsearch
.
For a list of valid values, see Supported instance types in Amazon OpenSearch Service in the Amazon OpenSearch Service Developer Guide.
" + "smithy.api#documentation": "The instance type for your data nodes. For example, m3.medium.elasticsearch
.
For a list of valid values, see Supported instance types in Amazon OpenSearch Service in the Amazon OpenSearch Service Developer Guide.
" } }, "ZoneAwarenessConfig": { @@ -9794,7 +8522,7 @@ "UpdateStatus": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The status of the service software update. Valid values are as follows:
\n\n COMPLETED
\n
\n ELIGIBLE
\n
\n IN_PROGRESS
\n
\n NOT_ELIGIBLE
\n
\n PENDING_UPDATE
\n
The status of the service software update. Valid values are as follows:
\n \t\n COMPLETED
\n
\n ELIGIBLE
\n
\n IN_PROGRESS
\n
\n NOT_ELIGIBLE
\n
\n PENDING_UPDATE
\n
The state of the KMS key. Valid values are as follows:
\n\n Disabled
\n
\n Enabled
\n
\n PendingDeletion
\n
\n PendingImport
\n
\n Unavailable
\n
The state of the KMS key. Valid values are as follows:
\n \t\n Disabled
\n
\n Enabled
\n
\n PendingDeletion
\n
\n PendingImport
\n
\n Unavailable
\n
The instance type for your data nodes.
\nFor a list of valid values, see Supported instance types in Amazon OpenSearch Service in the Amazon OpenSearch Service Developer Guide.
" + "smithy.api#documentation": "The instance type for your data nodes.
\n \tFor a list of valid values, see Supported instance types in Amazon OpenSearch Service in the Amazon OpenSearch Service Developer Guide.
" } }, "WarmType": { @@ -12149,7 +10877,7 @@ "UpdateStatus": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The status of the service software update. Valid values are as follows:
\n\n COMPLETED
\n
\n ELIGIBLE
\n
\n IN_PROGRESS
\n
\n NOT_ELIGIBLE
\n
\n PENDING_UPDATE
\n
The status of the service software update. Valid values are as follows:
\n \t\n COMPLETED
\n
\n ELIGIBLE
\n
\n IN_PROGRESS
\n
\n NOT_ELIGIBLE
\n
\n PENDING_UPDATE
\n
The status of the database activity stream. Valid values are as follows:
\n\n started
\n
\n starting
\n
\n stopped
\n
\n stopping
\n
The status of the database activity stream. Valid values are as follows:
\n \t\n started
\n
\n starting
\n
\n stopped
\n
\n stopping
\n
The rules to redirect the request if the condition in Condition
is\n met.
Indicates when the object was last modified.
\nUses the date-time
format specified in RFC 3339 section 5.6, Internet\n Date/Time Format. The value cannot contain spaces. For example,\n 2020-03-22T13:22:13.933Z
.
The opaque identifier assigned by a web server to a specific version of a resource found\n at a URL.
" - } - }, - "VersionId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The version of the object.
" - } - }, - "ContentType": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "A standard MIME type describing the format of the object data.
" - } - }, - "ServerSideEncryption": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "If the object is stored using server-side encryption, the value of the server-side\n encryption algorithm used when storing this object in Amazon S3.
" - } - }, - "SSEKMSKeyId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The identifier of the KMS symmetric customer managed key that was used for the object.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Details about an Amazon S3 object.
" - } - }, - "com.amazonaws.securityhub#AwsSageMakerNotebookInstanceDetails": { - "type": "structure", - "members": { - "AcceleratorTypes": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n A list of Amazon Elastic Inference instance types to associate with the notebook instance. Currently, only one instance type can be associated with a notebook instance. \n
" - } - }, - "AdditionalCodeRepositories": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n An array of up to three Git repositories associated with the notebook instance. These can be either the names of Git repositories stored as resources in your account, or the URL of Git repositories in AWS CodeCommit or in any other Git repository. \n These repositories are cloned at the same level as the default repository of your notebook instance. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide.\n
" - } - }, - "DefaultCodeRepository": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The Git repository associated with the notebook instance as its default code repository. This can be either the name of a Git repository stored as a resource in your account, or the URL of a Git repository in AWS CodeCommit or in any other Git repository. \n When you open a notebook instance, it opens in the directory that contains this repository. For more information, see Associating Git repositories with SageMaker notebook instances in the Amazon SageMaker Developer Guide.\n
" - } - }, - "DirectInternetAccess": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n Sets whether SageMaker provides internet access to the notebook instance. If you set this to Disabled
, this notebook instance is able to access resources only in your VPC, and is not be able to connect to SageMaker training and endpoint services unless you configure a Network Address Translation (NAT) Gateway in your VPC.\n
\n If status of the instance is Failed
, the reason it failed.\n
\n Information on the IMDS configuration of the notebook instance.\n
" - } - }, - "InstanceType": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The type of machine learning (ML) compute instance to launch for the notebook instance.\n
" - } - }, - "KmsKeyId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of an Key Management Service (KMS) key that SageMaker uses to encrypt data on the storage volume attached to your notebook instance. The KMS key you provide must be enabled. For information, see \n Enabling and disabling keys in the Key Management Service Developer Guide.\n
" - } - }, - "NetworkInterfaceId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The network interface ID that SageMaker created when the instance was created.\n
" - } - }, - "NotebookInstanceArn": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the notebook instance.\n
" - } - }, - "NotebookInstanceLifecycleConfigName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The name of a notebook instance lifecycle configuration.\n
" - } - }, - "NotebookInstanceName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The name of the new notebook instance.\n
" - } - }, - "NotebookInstanceStatus": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The status of the notebook instance.\n
" - } - }, - "PlatformIdentifier": { + "smithy.api#documentation": "The rules to redirect the request if the condition in Condition
is\n met.
\n The platform identifier of the notebook instance runtime environment.\n
" + "smithy.api#documentation": "Indicates when the object was last modified.
\nUses the date-time
format specified in RFC 3339 section 5.6, Internet\n Date/Time Format. The value cannot contain spaces. For example,\n 2020-03-22T13:22:13.933Z
.
\n The Amazon Resource Name (ARN) of the IAM role associated with the instance.\n
" + "smithy.api#documentation": "The opaque identifier assigned by a web server to a specific version of a resource found\n at a URL.
" } }, - "RootAccess": { + "VersionId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Whether root access is enabled or disabled for users of the notebook instance. \n
" - } - }, - "SecurityGroups": { - "target": "com.amazonaws.securityhub#NonEmptyStringList", - "traits": { - "smithy.api#documentation": "\n The VPC security group IDs.\n
" + "smithy.api#documentation": "The version of the object.
" } }, - "SubnetId": { + "ContentType": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The ID of the VPC subnet to which you have a connectivity from your ML compute instance.\n
" + "smithy.api#documentation": "A standard MIME type describing the format of the object data.
" } }, - "Url": { + "ServerSideEncryption": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The URL that you use to connect to the Jupyter notebook that is running in your notebook instance.\n
" + "smithy.api#documentation": "If the object is stored using server-side encryption, the value of the server-side\n encryption algorithm used when storing this object in Amazon S3.
" } }, - "VolumeSizeInGB": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The size, in GB, of the ML storage volume to attach to the notebook instance.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Provides details about an Amazon SageMaker notebook instance.\n
" - } - }, - "com.amazonaws.securityhub#AwsSageMakerNotebookInstanceMetadataServiceConfigurationDetails": { - "type": "structure", - "members": { - "MinimumInstanceMetadataServiceVersion": { + "SSEKMSKeyId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Indicates the minimum IMDS version that the notebook instance supports.\n
" + "smithy.api#documentation": "The identifier of the KMS symmetric customer managed key that was used for the object.
" } } }, "traits": { - "smithy.api#documentation": "\n Information on the instance metadata service (IMDS) configuration of the notebook instance.\n
" + "smithy.api#documentation": "Details about an Amazon S3 object.
" } }, "com.amazonaws.securityhub#AwsSecretsManagerSecretDetails": { @@ -16714,552 +15299,291 @@ } }, "NonCompliantHighCount": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "For the patches that are noncompliant, the number that have a severity of\n HIGH
.
For the patches that are compliant, the number that have a severity of\n LOW
.
The type of resource for which the compliance was determined. For\n AwsSsmPatchCompliance
, ComplianceType
is Patch
.
The identifier of the patch baseline. The patch baseline lists the patches that are\n approved for installation.
" - } - }, - "OverallSeverity": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The highest severity for the patches. Valid values are as follows:
\n\n CRITICAL
\n
\n HIGH
\n
\n MEDIUM
\n
\n LOW
\n
\n INFORMATIONAL
\n
\n UNSPECIFIED
\n
For the patches that are noncompliant, the number that have a severity of\n MEDIUM
.
For the patches that are noncompliant, the number that have a severity of\n UNSPECIFIED
.
The identifier of the patch group for which compliance was determined. A patch group\n uses tags to group EC2 instances that should have the same patch compliance.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Provides the details about the compliance status for a patch.
" - } - }, - "com.amazonaws.securityhub#AwsSsmPatch": { - "type": "structure", - "members": { - "ComplianceSummary": { - "target": "com.amazonaws.securityhub#AwsSsmComplianceSummary", - "traits": { - "smithy.api#documentation": "The compliance status details for the patch.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Provides details about the compliance for a patch.
" - } - }, - "com.amazonaws.securityhub#AwsSsmPatchComplianceDetails": { - "type": "structure", - "members": { - "Patch": { - "target": "com.amazonaws.securityhub#AwsSsmPatch", - "traits": { - "smithy.api#documentation": "Information about the status of a patch.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Provides information about the state of a patch on an instance based on the patch\n baseline that was used to patch the instance.
" - } - }, - "com.amazonaws.securityhub#AwsWafRateBasedRuleDetails": { - "type": "structure", - "members": { - "MetricName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the metrics for the rate-based rule.
" - } - }, - "Name": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the rate-based rule.
" - } - }, - "RateKey": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The field that WAF uses to determine whether requests are likely arriving from single source and are subject to rate monitoring.
" - } - }, - "RateLimit": { - "target": "com.amazonaws.securityhub#Long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The maximum number of requests that have an identical value for the field specified in RateKey
that are allowed within a five-minute period. If the number of requests exceeds RateLimit
and the other predicates specified in the rule are met, WAF triggers the action for the rule.
The unique identifier for the rate-based rule.
" - } - }, - "MatchPredicates": { - "target": "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicateList", - "traits": { - "smithy.api#documentation": "The predicates to include in the rate-based rule.
" - } - } - }, - "traits": { - "smithy.api#documentation": "Details about a rate-based rule for global resources. A rate-based rule provides settings to indicate when to allow, block, or count a request. Rate-based rules include the number of requests that arrive over a specified period of time.
" - } - }, - "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicate": { - "type": "structure", - "members": { - "DataId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The unique identifier for the predicate.
" - } - }, - "Negated": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "If set to true
, then the rule actions are performed on requests that match the predicate settings.
If set to false
, then the rule actions are performed on all requests except those that match the predicate settings.\n
The type of predicate. Valid values are as follows:
\n\n ByteMatch
\n
\n GeoMatch
\n
\n IPMatch
\n
\n RegexMatch
\n
\n SizeConstraint
\n
\n SqlInjectionMatch
\n
\n XssMatch
\n
A match predicate. A predicate might look for characteristics such as specific IP addresses, geographic locations, or sizes.
" - } - }, - "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicateList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicate" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleDetails": { - "type": "structure", - "members": { - "MetricName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the metrics for the rate-based rule.
" - } - }, - "Name": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The name of the rate-based rule.
" - } - }, - "RateKey": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The field that WAF uses to determine whether requests are likely arriving from single source and are subject to rate monitoring.
" - } - }, - "RateLimit": { - "target": "com.amazonaws.securityhub#Long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The maximum number of requests that have an identical value for the field specified in RateKey
that are allowed within a five-minute period. If the number of requests exceeds RateLimit
and the other predicates specified in the rule are met, WAF triggers the action for the rule.
The unique identifier for the rate-based rule.
" - } - }, - "MatchPredicates": { - "target": "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicateList", - "traits": { - "smithy.api#documentation": "The predicates to include in the rate-based rule.
" - } - } - }, - "traits": { - "smithy.api#documentation": "contains details about a rate-based rule for Regional resources. A rate-based rule provides settings to indicate when to allow, block, or count a request. Rate-based rules include the number of requests that arrive over a specified period of time.
" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicate": { - "type": "structure", - "members": { - "DataId": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "The unique identifier for the predicate.
" + "smithy.api#default": 0, + "smithy.api#documentation": "For the patches that are noncompliant, the number that have a severity of\n HIGH
.
If set to true
, then the rule actions are performed on requests that match the predicate settings.
If set to false
, then the rule actions are performed on all requests except those that match the predicate settings.
For the patches that are compliant, the number that have a severity of\n LOW
.
The type of predicate. Valid values are as follows:
\n\n ByteMatch
\n
\n GeoMatch
\n
\n IPMatch
\n
\n RegexMatch
\n
\n SizeConstraint
\n
\n SqlInjectionMatch
\n
\n XssMatch
\n
The type of resource for which the compliance was determined. For\n AwsSsmPatchCompliance
, ComplianceType
is Patch
.
Details for a match predicate. A predicate might look for characteristics such as specific IP addresses, geographic locations, or sizes.
" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicateList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicate" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalRuleDetails": { - "type": "structure", - "members": { - "MetricName": { + }, + "PatchBaselineId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A name for the metrics for the rule. \n
" + "smithy.api#documentation": "The identifier of the patch baseline. The patch baseline lists the patches that are\n approved for installation.
" } }, - "Name": { + "OverallSeverity": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A descriptive name for the rule.\n
" + "smithy.api#documentation": "The highest severity for the patches. Valid values are as follows:
\n\n CRITICAL
\n
\n HIGH
\n
\n MEDIUM
\n
\n LOW
\n
\n INFORMATIONAL
\n
\n UNSPECIFIED
\n
Specifies the ByteMatchSet
, IPSet
,\n SqlInjectionMatchSet
, XssMatchSet
, RegexMatchSet
,\n GeoMatchSet
, and SizeConstraintSet
objects that you want to\n add to a rule and, for each object, indicates whether you want to negate the settings.
For the patches that are noncompliant, the number that have a severity of\n MEDIUM
.
For the patches that are noncompliant, the number that have a severity of\n UNSPECIFIED
.
The ID of the rule.\n
" + "smithy.api#documentation": "The identifier of the patch group for which compliance was determined. A patch group\n uses tags to group EC2 instances that should have the same patch compliance.
" } } }, "traits": { - "smithy.api#documentation": "Provides information about an WAF Regional rule. This rule identifies the web requests that you want to allow, block, or count.
" + "smithy.api#documentation": "Provides the details about the compliance status for a patch.
" } }, - "com.amazonaws.securityhub#AwsWafRegionalRuleGroupDetails": { + "com.amazonaws.securityhub#AwsSsmPatch": { "type": "structure", "members": { - "MetricName": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "A name for the metrics for this rule group.\n
" - } - }, - "Name": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The descriptive name of the rule group.\n
" - } - }, - "RuleGroupId": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "The ID of the rule group.\n
" - } - }, - "Rules": { - "target": "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesList", + "ComplianceSummary": { + "target": "com.amazonaws.securityhub#AwsSsmComplianceSummary", "traits": { - "smithy.api#documentation": "Provides information about the rule statements used to identify the web requests that you want to allow, block, or \ncount. \n
" + "smithy.api#documentation": "The compliance status details for the patch.
" } } }, "traits": { - "smithy.api#documentation": "Provides information about an WAF Regional rule group. The rule group is a collection of rules for inspecting and controlling web\n requests.
" + "smithy.api#documentation": "Provides details about the compliance for a patch.
" } }, - "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesActionDetails": { + "com.amazonaws.securityhub#AwsSsmPatchComplianceDetails": { "type": "structure", "members": { - "Type": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Patch": { + "target": "com.amazonaws.securityhub#AwsSsmPatch", "traits": { - "smithy.api#documentation": "Specifies the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, RegexMatchSet
, \nGeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, for each object, indicates whether you want to negate the settings.
Information about the status of a patch.
" } } }, "traits": { - "smithy.api#documentation": "Describes the action that WAF should take on a web request when it matches the criteria defined in the rule.\n
" + "smithy.api#documentation": "Provides information about the state of a patch on an instance based on the patch\n baseline that was used to patch the instance.
" } }, - "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesDetails": { + "com.amazonaws.securityhub#AwsWafRateBasedRuleDetails": { "type": "structure", "members": { - "Action": { - "target": "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesActionDetails", + "MetricName": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The action that WAF should take on a web request when it matches the criteria defined in the rule.
" + "smithy.api#documentation": "The name of the metrics for the rate-based rule.
" } }, - "Priority": { - "target": "com.amazonaws.securityhub#Integer", + "Name": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "The name of the rate-based rule.
" + } + }, + "RateKey": { + "target": "com.amazonaws.securityhub#NonEmptyString", + "traits": { + "smithy.api#documentation": "The field that WAF uses to determine whether requests are likely arriving from single source and are subject to rate monitoring.
" + } + }, + "RateLimit": { + "target": "com.amazonaws.securityhub#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "If you define more than one rule in a web ACL, WAF evaluates each request against the rules in\n order based on the value of Priority
.
The maximum number of requests that have an identical value for the field specified in RateKey
that are allowed within a five-minute period. If the number of requests exceeds RateLimit
and the other predicates specified in the rule are met, WAF triggers the action for the rule.
The ID for a rule.\n
" + "smithy.api#documentation": "The unique identifier for the rate-based rule.
" } }, - "Type": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "MatchPredicates": { + "target": "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicateList", "traits": { - "smithy.api#documentation": "The type of rule in the rule group.\n
" + "smithy.api#documentation": "The predicates to include in the rate-based rule.
" } } }, "traits": { - "smithy.api#documentation": "Provides information about the rules attached to a rule group\n
" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesDetails" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalRulePredicateList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafRegionalRulePredicateListDetails" + "smithy.api#documentation": "Details about a rate-based rule for global resources. A rate-based rule provides settings to indicate when to allow, block, or count a request. Rate-based rules include the number of requests that arrive over a specified period of time.
" } }, - "com.amazonaws.securityhub#AwsWafRegionalRulePredicateListDetails": { + "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicate": { "type": "structure", "members": { "DataId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A unique identifier for a predicate in a rule, such as ByteMatchSetId
or IPSetId
.\n
The unique identifier for the predicate.
" } }, "Negated": { "target": "com.amazonaws.securityhub#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Specifies if you want WAF to allow, block, or count requests based on the settings in the \n ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, or SizeConstraintSet
.\n
If set to true
, then the rule actions are performed on requests that match the predicate settings.
If set to false
, then the rule actions are performed on all requests except those that match the predicate settings.\n
The type of predicate in a rule, such as ByteMatch
or IPSet
.\n
The type of predicate. Valid values are as follows:
\n\n ByteMatch
\n
\n GeoMatch
\n
\n IPMatch
\n
\n RegexMatch
\n
\n SizeConstraint
\n
\n SqlInjectionMatch
\n
\n XssMatch
\n
Provides details about the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, for each object, indicates whether you want to negate the settings.\n
A match predicate. A predicate might look for characteristics such as specific IP addresses, geographic locations, or sizes.
" } }, - "com.amazonaws.securityhub#AwsWafRegionalWebAclDetails": { + "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicateList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsWafRateBasedRuleMatchPredicate" + } + }, + "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleDetails": { "type": "structure", "members": { - "DefaultAction": { + "MetricName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The action to perform if none of the rules contained in the web ACL match. \n
" + "smithy.api#documentation": "The name of the metrics for the rate-based rule.
" } }, - "MetricName": { + "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A name for the metrics for this web ACL. \n
" + "smithy.api#documentation": "The name of the rate-based rule.
" } }, - "Name": { + "RateKey": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A descriptive name for the web ACL. \n
" + "smithy.api#documentation": "The field that WAF uses to determine whether requests are likely arriving from single source and are subject to rate monitoring.
" } }, - "RulesList": { - "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesList", + "RateLimit": { + "target": "com.amazonaws.securityhub#Long", "traits": { - "smithy.api#documentation": "An array that contains the action for each rule in a web ACL, the priority of the rule, and the ID of \nthe rule.\n
" + "smithy.api#default": 0, + "smithy.api#documentation": "The maximum number of requests that have an identical value for the field specified in RateKey
that are allowed within a five-minute period. If the number of requests exceeds RateLimit
and the other predicates specified in the rule are met, WAF triggers the action for the rule.
The ID of the web ACL.\n
" + "smithy.api#documentation": "The unique identifier for the rate-based rule.
" } - } - }, - "traits": { - "smithy.api#documentation": "Provides information about the web access control list (web ACL). The web ACL contains the rules that identify the requests that you\n want to allow, block, or count.
" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListDetails" - } - }, - "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListActionDetails": { - "type": "structure", - "members": { - "Type": { - "target": "com.amazonaws.securityhub#NonEmptyString", + }, + "MatchPredicates": { + "target": "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicateList", "traits": { - "smithy.api#documentation": "For actions that are associated with a rule, the action that WAF takes when a web request matches all conditions in a rule.\n
" + "smithy.api#documentation": "The predicates to include in the rate-based rule.
" } } }, "traits": { - "smithy.api#documentation": "The action that WAF takes when a web request matches all conditions in the\n rule, such as allow, block, or count the request.
" + "smithy.api#documentation": "contains details about a rate-based rule for Regional resources. A rate-based rule provides settings to indicate when to allow, block, or count a request. Rate-based rules include the number of requests that arrive over a specified period of time.
" } }, - "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListDetails": { + "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicate": { "type": "structure", "members": { - "Action": { - "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListActionDetails", - "traits": { - "smithy.api#documentation": "The action that WAF takes when a web request matches all conditions in the rule, such as allow, \n block, or count the request.\n
" - } - }, - "OverrideAction": { - "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListOverrideActionDetails", - "traits": { - "smithy.api#documentation": "Overrides the rule evaluation result in the rule group. \n
" - } - }, - "Priority": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "The order in which WAF evaluates the rules in a web ACL.\n
" - } - }, - "RuleId": { + "DataId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The ID of an WAF Regional rule to associate with a web ACL.
" + "smithy.api#documentation": "The unique identifier for the predicate.
" } }, - "Type": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Negated": { + "target": "com.amazonaws.securityhub#Boolean", "traits": { - "smithy.api#documentation": "For actions that are associated with a rule, the action that WAF takes when a web \nrequest matches all conditions in a rule.\n
" + "smithy.api#default": false, + "smithy.api#documentation": "If set to true
, then the rule actions are performed on requests that match the predicate settings.
If set to false
, then the rule actions are performed on all requests except those that match the predicate settings.
A combination of ByteMatchSet
, IPSet
, and/or SqlInjectionMatchSet
\nobjects that identify the web requests that you want to allow, block, or count. \n
Overrides the rule evaluation result in the rule group.\n
" + "smithy.api#documentation": "The type of predicate. Valid values are as follows:
\n\n ByteMatch
\n
\n GeoMatch
\n
\n IPMatch
\n
\n RegexMatch
\n
\n SizeConstraint
\n
\n SqlInjectionMatch
\n
\n XssMatch
\n
Provides details about the action to use in the place of the action that results from the rule group \nevaluation. \n
" + "smithy.api#documentation": "Details for a match predicate. A predicate might look for characteristics such as specific IP addresses, geographic locations, or sizes.
" } }, - "com.amazonaws.securityhub#AwsWafRuleDetails": { + "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicateList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsWafRegionalRateBasedRuleMatchPredicate" + } + }, + "com.amazonaws.securityhub#AwsWafRegionalRuleDetails": { "type": "structure", "members": { "MetricName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The name of the metrics for this rule.\n
" + "smithy.api#documentation": "A name for the metrics for the rule. \n
" } }, "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A descriptive name for the rule. \n
" + "smithy.api#documentation": "A descriptive name for the rule.\n
" } }, "PredicateList": { - "target": "com.amazonaws.securityhub#AwsWafRulePredicateList", + "target": "com.amazonaws.securityhub#AwsWafRegionalRulePredicateList", "traits": { - "smithy.api#documentation": "Specifies the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, \n for each object, indicates whether you want to negate the settings.\n
Specifies the ByteMatchSet
, IPSet
,\n SqlInjectionMatchSet
, XssMatchSet
, RegexMatchSet
,\n GeoMatchSet
, and SizeConstraintSet
objects that you want to\n add to a rule and, for each object, indicates whether you want to negate the settings.
The ID of the WAF rule. \n
" + "smithy.api#documentation": "The ID of the rule.\n
" } } }, "traits": { - "smithy.api#documentation": "Provides information about a WAF rule. This rule specifies the web requests that you want to allow, block, or count.\n
" + "smithy.api#documentation": "Provides information about an WAF Regional rule. This rule identifies the web requests that you want to allow, block, or count.
" } }, - "com.amazonaws.securityhub#AwsWafRuleGroupDetails": { + "com.amazonaws.securityhub#AwsWafRegionalRuleGroupDetails": { "type": "structure", "members": { "MetricName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The name of the metrics for this rule group. \n
" + "smithy.api#documentation": "A name for the metrics for this rule group.\n
" } }, "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The name of the rule group. \n
" + "smithy.api#documentation": "The descriptive name of the rule group.\n
" } }, "RuleGroupId": { @@ -17269,76 +15593,76 @@ } }, "Rules": { - "target": "com.amazonaws.securityhub#AwsWafRuleGroupRulesList", + "target": "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesList", "traits": { - "smithy.api#documentation": "Provides information about the rules attached to the rule group. These rules identify the web requests that you want to \nallow, block, or count. \n
" + "smithy.api#documentation": "Provides information about the rule statements used to identify the web requests that you want to allow, block, or \ncount. \n
" } } }, "traits": { - "smithy.api#documentation": "Provides information about an WAF rule group. A rule group is a collection of rules for inspecting and controlling web requests. \n
" + "smithy.api#documentation": "Provides information about an WAF Regional rule group. The rule group is a collection of rules for inspecting and controlling web\n requests.
" } }, - "com.amazonaws.securityhub#AwsWafRuleGroupRulesActionDetails": { + "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesActionDetails": { "type": "structure", "members": { "Type": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The action that WAF should take on a web request when it matches the rule's\n statement.
" + "smithy.api#documentation": "Specifies the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, RegexMatchSet
, \nGeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, for each object, indicates whether you want to negate the settings.
Provides information about what action WAF should take on a web request when it matches the criteria defined in the rule.\n
" + "smithy.api#documentation": "Describes the action that WAF should take on a web request when it matches the criteria defined in the rule.\n
" } }, - "com.amazonaws.securityhub#AwsWafRuleGroupRulesDetails": { + "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesDetails": { "type": "structure", "members": { "Action": { - "target": "com.amazonaws.securityhub#AwsWafRuleGroupRulesActionDetails", + "target": "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesActionDetails", "traits": { - "smithy.api#documentation": "Provides information about what action WAF should take on a web request when it matches the criteria defined in the rule.\n
" + "smithy.api#documentation": "The action that WAF should take on a web request when it matches the criteria defined in the rule.
" } }, "Priority": { "target": "com.amazonaws.securityhub#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "If you define more than one rule in a web ACL, WAF evaluates each request against the rules in order\n based on the value of Priority
.
If you define more than one rule in a web ACL, WAF evaluates each request against the rules in\n order based on the value of Priority
.
The rule ID for a rule.\n
" + "smithy.api#documentation": "The ID for a rule.\n
" } }, "Type": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The type of rule.\n
" + "smithy.api#documentation": "The type of rule in the rule group.\n
" } } }, "traits": { - "smithy.api#documentation": "Provides information about the rules attached to the rule group. These rules identify the web requests that you want to \n allow, block, or count. \n
" + "smithy.api#documentation": "Provides information about the rules attached to a rule group\n
" } }, - "com.amazonaws.securityhub#AwsWafRuleGroupRulesList": { + "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesList": { "type": "list", "member": { - "target": "com.amazonaws.securityhub#AwsWafRuleGroupRulesDetails" + "target": "com.amazonaws.securityhub#AwsWafRegionalRuleGroupRulesDetails" } }, - "com.amazonaws.securityhub#AwsWafRulePredicateList": { + "com.amazonaws.securityhub#AwsWafRegionalRulePredicateList": { "type": "list", "member": { - "target": "com.amazonaws.securityhub#AwsWafRulePredicateListDetails" + "target": "com.amazonaws.securityhub#AwsWafRegionalRulePredicateListDetails" } }, - "com.amazonaws.securityhub#AwsWafRulePredicateListDetails": { + "com.amazonaws.securityhub#AwsWafRegionalRulePredicateListDetails": { "type": "structure", "members": { "DataId": { @@ -17362,494 +15686,351 @@ } }, "traits": { - "smithy.api#documentation": "Provides details about the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, \nfor each object, indicates whether you want to negate the settings.\n
Provides details about the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, for each object, indicates whether you want to negate the settings.\n
A friendly name or description of the web ACL. You can't change the name of a web ACL after you create it.
" + "smithy.api#documentation": "The action to perform if none of the rules contained in the web ACL match. \n
" } }, - "DefaultAction": { + "MetricName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The action to perform if none of the rules contained in the web ACL match.
" + "smithy.api#documentation": "A name for the metrics for this web ACL. \n
" } }, - "Rules": { - "target": "com.amazonaws.securityhub#AwsWafWebAclRuleList", + "Name": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "An array that contains the action for each rule in a web ACL, the priority of the rule, and the ID of the rule.
" + "smithy.api#documentation": "A descriptive name for the web ACL. \n
" + } + }, + "RulesList": { + "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesList", + "traits": { + "smithy.api#documentation": "An array that contains the action for each rule in a web ACL, the priority of the rule, and the ID of \nthe rule.\n
" } }, "WebAclId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "A unique identifier for a web ACL.
" + "smithy.api#documentation": "The ID of the web ACL.\n
" } } }, "traits": { - "smithy.api#documentation": "Provides information about an WAF web access control list (web ACL).
" + "smithy.api#documentation": "Provides information about the web access control list (web ACL). The web ACL contains the rules that identify the requests that you\n want to allow, block, or count.
" } }, - "com.amazonaws.securityhub#AwsWafWebAclRule": { + "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListDetails" + } + }, + "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListActionDetails": { "type": "structure", "members": { - "Action": { - "target": "com.amazonaws.securityhub#WafAction", + "Type": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "Specifies the action that CloudFront or WAF takes when a web request matches the\n conditions in the rule.
" + "smithy.api#documentation": "For actions that are associated with a rule, the action that WAF takes when a web request matches all conditions in a rule.\n
" } - }, - "ExcludedRules": { - "target": "com.amazonaws.securityhub#WafExcludedRuleList", + } + }, + "traits": { + "smithy.api#documentation": "The action that WAF takes when a web request matches all conditions in the\n rule, such as allow, block, or count the request.
" + } + }, + "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListDetails": { + "type": "structure", + "members": { + "Action": { + "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListActionDetails", "traits": { - "smithy.api#documentation": "Rules to exclude from a rule group.
" + "smithy.api#documentation": "The action that WAF takes when a web request matches all conditions in the rule, such as allow, \n block, or count the request.\n
" } }, "OverrideAction": { - "target": "com.amazonaws.securityhub#WafOverrideAction", + "target": "com.amazonaws.securityhub#AwsWafRegionalWebAclRulesListOverrideActionDetails", "traits": { - "smithy.api#documentation": "Use the OverrideAction
to test your RuleGroup
.
Any rule in a RuleGroup
can potentially block a request. If you set the OverrideAction
to\n None
, the RuleGroup
blocks a request if any individual rule in the RuleGroup
\n matches the request and is configured to block that request.
However, if you first want to test the RuleGroup
,\n set the OverrideAction
to Count
. The RuleGroup
\n then overrides any block action specified by individual rules contained within the group.\n Instead of blocking matching requests, those requests are counted.
\n ActivatedRule
|OverrideAction
applies only when updating or\n adding a RuleGroup
\n to a web ACL. In this case you do not use ActivatedRule
\n Action
. For all other update requests,\n ActivatedRule
\n Action
is used instead of ActivatedRule
\n OverrideAction
.
Overrides the rule evaluation result in the rule group. \n
" } }, "Priority": { "target": "com.amazonaws.securityhub#Integer", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "Specifies the order in which the rules in a web\n ACL are evaluated. Rules with a lower value for Priority
are\n evaluated before rules with a higher value. The value must be a unique integer. If you add\n multiple rules to a web ACL, the values do not need to be consecutive.
The order in which WAF evaluates the rules in a web ACL.\n
" } }, "RuleId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The identifier for a rule.
" + "smithy.api#documentation": "The ID of an WAF Regional rule to associate with a web ACL.
" } }, "Type": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The rule type.
\nValid values: REGULAR
| RATE_BASED
| GROUP
\n
The default is REGULAR
.
Details for a rule in an WAF web ACL.
" - } - }, - "com.amazonaws.securityhub#AwsWafWebAclRuleList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafWebAclRule" - } - }, - "com.amazonaws.securityhub#AwsWafv2ActionAllowDetails": { - "type": "structure", - "members": { - "CustomRequestHandling": { - "target": "com.amazonaws.securityhub#AwsWafv2CustomRequestHandlingDetails", - "traits": { - "smithy.api#documentation": "\n Defines custom handling for the web request. For information about customizing web requests and responses, see \n Customizing web requests and responses in WAF in the WAF Developer Guide..\n
" + "smithy.api#documentation": "For actions that are associated with a rule, the action that WAF takes when a web \nrequest matches all conditions in a rule.\n
" } } }, "traits": { - "smithy.api#documentation": "\n Specifies that WAF should allow the request and optionally defines additional custom handling for the request.\n
" + "smithy.api#documentation": "A combination of ByteMatchSet
, IPSet
, and/or SqlInjectionMatchSet
\nobjects that identify the web requests that you want to allow, block, or count. \n
\n Defines a custom response for the web request. For information, see \n Customizing web requests and responses in WAF in the WAF Developer Guide..\n
" + "smithy.api#documentation": "Overrides the rule evaluation result in the rule group.\n
" } } }, "traits": { - "smithy.api#documentation": "\n Specifies that WAF should block the request and optionally defines additional custom handling for the response to the web request.\n
" + "smithy.api#documentation": "Provides details about the action to use in the place of the action that results from the rule group \nevaluation. \n
" } }, - "com.amazonaws.securityhub#AwsWafv2CustomHttpHeader": { + "com.amazonaws.securityhub#AwsWafRuleDetails": { "type": "structure", "members": { - "Name": { + "MetricName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The name of the custom header.\n
" + "smithy.api#documentation": "The name of the metrics for this rule.\n
" } }, - "Value": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n The value of the custom header.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n A custom header for custom request and response handling.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2CustomRequestHandlingDetails": { - "type": "structure", - "members": { - "InsertHeaders": { - "target": "com.amazonaws.securityhub#AwsWafv2InsertHeadersList", - "traits": { - "smithy.api#documentation": "\n The HTTP headers to insert into the request.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Custom request handling behavior that inserts custom headers into a web request. WAF uses custom request handling when the rule action doesn't block the request.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2CustomResponseDetails": { - "type": "structure", - "members": { - "CustomResponseBodyKey": { + "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n References the response body that you want WAF to return to the web request client. You can define a custom response for a rule action or a default web ACL action that is set to block.\n
" + "smithy.api#documentation": "A descriptive name for the rule. \n
" } }, - "ResponseCode": { - "target": "com.amazonaws.securityhub#Integer", + "PredicateList": { + "target": "com.amazonaws.securityhub#AwsWafRulePredicateList", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The HTTP status code to return to the client. For a list of status codes that you can use in your custom responses, see \n Supported status codes for custom response in the WAF Developer Guide.\n
" + "smithy.api#documentation": "Specifies the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, \n for each object, indicates whether you want to negate the settings.\n
\n The HTTP headers to use in the response.\n
" + "smithy.api#documentation": "The ID of the WAF rule. \n
" } } }, "traits": { - "smithy.api#documentation": "\n A custom response to send to the client. You can define a custom response for rule actions and default web ACL actions that are set to block.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2InsertHeadersList": { - "type": "list", - "member": { - "target": "com.amazonaws.securityhub#AwsWafv2CustomHttpHeader" + "smithy.api#documentation": "Provides information about a WAF rule. This rule specifies the web requests that you want to allow, block, or count.\n
" } }, - "com.amazonaws.securityhub#AwsWafv2RuleGroupDetails": { + "com.amazonaws.securityhub#AwsWafRuleGroupDetails": { "type": "structure", "members": { - "Capacity": { - "target": "com.amazonaws.securityhub#Long", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The web ACL capacity units (WCUs) required for this rule group. \n
" - } - }, - "Description": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n A description of the rule group that helps with identification.\n
" - } - }, - "Id": { + "MetricName": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n A unique identifier for the rule group. \n
" + "smithy.api#documentation": "The name of the metrics for this rule group. \n
" } }, "Name": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The name of the rule group. You cannot change the name of a rule group after you create it.\n
" + "smithy.api#documentation": "The name of the rule group. \n
" } }, - "Arn": { + "RuleGroupId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the entity.\n
" + "smithy.api#documentation": "The ID of the rule group.\n
" } }, "Rules": { - "target": "com.amazonaws.securityhub#AwsWafv2RulesList", - "traits": { - "smithy.api#documentation": "\n The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one \n top-level statement that WAF uses to identify matching web requests, and parameters that govern how \n WAF handles them.\n
" - } - }, - "Scope": { - "target": "com.amazonaws.securityhub#NonEmptyString", - "traits": { - "smithy.api#documentation": "\n Specifies whether the rule group is for an Amazon CloudFront distribution or for a regional application. \n A regional application can be an Application Load Balancer (ALB), an Amazon API Gateway REST API, an AppSync \n GraphQL API, or an Amazon Cognito user pool.\n
" - } - }, - "VisibilityConfig": { - "target": "com.amazonaws.securityhub#AwsWafv2VisibilityConfigDetails", - "traits": { - "smithy.api#documentation": "\n Defines and enables Amazon CloudWatch metrics and web request sample collection.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Details about an WAFv2 rule group.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2RulesActionCaptchaDetails": { - "type": "structure", - "members": { - "CustomRequestHandling": { - "target": "com.amazonaws.securityhub#AwsWafv2CustomRequestHandlingDetails", - "traits": { - "smithy.api#documentation": "\n Defines custom handling for the web request, used when the CAPTCHA inspection determines that the request's token is valid and unexpired. For more information, \n see Customizing web requests and responses in WAF in the WAF Developer Guide..\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies that WAF should run a CAPTCHA check against the request.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2RulesActionCountDetails": { - "type": "structure", - "members": { - "CustomRequestHandling": { - "target": "com.amazonaws.securityhub#AwsWafv2CustomRequestHandlingDetails", + "target": "com.amazonaws.securityhub#AwsWafRuleGroupRulesList", "traits": { - "smithy.api#documentation": "\n Defines custom handling for the web request. For more information, \n see Customizing web requests and responses in WAF in the WAF Developer Guide..\n
" + "smithy.api#documentation": "Provides information about the rules attached to the rule group. These rules identify the web requests that you want to \nallow, block, or count. \n
" } } }, "traits": { - "smithy.api#documentation": "\n Specifies that WAF should count the request.\n
" + "smithy.api#documentation": "Provides information about an WAF rule group. A rule group is a collection of rules for inspecting and controlling web requests. \n
" } }, - "com.amazonaws.securityhub#AwsWafv2RulesActionDetails": { + "com.amazonaws.securityhub#AwsWafRuleGroupRulesActionDetails": { "type": "structure", "members": { - "Allow": { - "target": "com.amazonaws.securityhub#AwsWafv2ActionAllowDetails", - "traits": { - "smithy.api#documentation": "\n Instructs WAF to allow the web request.\n
" - } - }, - "Block": { - "target": "com.amazonaws.securityhub#AwsWafv2ActionBlockDetails", - "traits": { - "smithy.api#documentation": "\n Instructs WAF to block the web request.\n
" - } - }, - "Captcha": { - "target": "com.amazonaws.securityhub#AwsWafv2RulesActionCaptchaDetails", - "traits": { - "smithy.api#documentation": "\n Instructs WAF to run a CAPTCHA check against the web request.\n
" - } - }, - "Count": { - "target": "com.amazonaws.securityhub#AwsWafv2RulesActionCountDetails", + "Type": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Instructs WAF to count the web request and then continue evaluating the request using the remaining rules in the web ACL.\n
" + "smithy.api#documentation": "The action that WAF should take on a web request when it matches the rule's\n statement.
" } } }, "traits": { - "smithy.api#documentation": "\n The action that WAF should take on a web request when it matches a rule's statement. \n Settings at the web ACL level can override the rule action setting.\n
" + "smithy.api#documentation": "Provides information about what action WAF should take on a web request when it matches the criteria defined in the rule.\n
" } }, - "com.amazonaws.securityhub#AwsWafv2RulesDetails": { + "com.amazonaws.securityhub#AwsWafRuleGroupRulesDetails": { "type": "structure", "members": { "Action": { - "target": "com.amazonaws.securityhub#AwsWafv2RulesActionDetails", + "target": "com.amazonaws.securityhub#AwsWafRuleGroupRulesActionDetails", "traits": { - "smithy.api#documentation": "\n The action that WAF should take on a web request when it matches the rule statement. Settings at the web ACL level can override the rule action setting.\n
" + "smithy.api#documentation": "Provides information about what action WAF should take on a web request when it matches the criteria defined in the rule.\n
" } }, - "Name": { - "target": "com.amazonaws.securityhub#NonEmptyString", + "Priority": { + "target": "com.amazonaws.securityhub#Integer", "traits": { - "smithy.api#documentation": "\n The name of the rule. \n
" + "smithy.api#default": 0, + "smithy.api#documentation": "If you define more than one rule in a web ACL, WAF evaluates each request against the rules in order\n based on the value of Priority
.
\n The action to use in the place of the action that results from the rule group evaluation. \n
" - } - }, - "Priority": { - "target": "com.amazonaws.securityhub#Integer", - "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n If you define more than one Rule in a WebACL, WAF evaluates each request against the Rules in order based on the value of Priority
. \n WAF processes rules with lower priority first. The priorities don't need to be consecutive, but they must all be different.\n
The rule ID for a rule.\n
" } }, - "VisibilityConfig": { - "target": "com.amazonaws.securityhub#AwsWafv2VisibilityConfigDetails", + "Type": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Defines and enables Amazon CloudWatch metrics and web request sample collection.\n
" + "smithy.api#documentation": "The type of rule.\n
" } } }, "traits": { - "smithy.api#documentation": "\n Provides details about rules in a rule group. A rule identifies web requests that you want to allow, block, or count. Each rule includes one top-level Statement that AWS WAF uses to identify matching web requests, and parameters that govern how AWS WAF handles them.\n
" + "smithy.api#documentation": "Provides information about the rules attached to the rule group. These rules identify the web requests that you want to \n allow, block, or count. \n
" + } + }, + "com.amazonaws.securityhub#AwsWafRuleGroupRulesList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsWafRuleGroupRulesDetails" } }, - "com.amazonaws.securityhub#AwsWafv2RulesList": { + "com.amazonaws.securityhub#AwsWafRulePredicateList": { "type": "list", "member": { - "target": "com.amazonaws.securityhub#AwsWafv2RulesDetails" + "target": "com.amazonaws.securityhub#AwsWafRulePredicateListDetails" } }, - "com.amazonaws.securityhub#AwsWafv2VisibilityConfigDetails": { + "com.amazonaws.securityhub#AwsWafRulePredicateListDetails": { "type": "structure", "members": { - "CloudWatchMetricsEnabled": { - "target": "com.amazonaws.securityhub#Boolean", - "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n A boolean indicating whether the associated resource sends metrics to Amazon CloudWatch. For the list of available \n metrics, see WAF metrics and dimensions in the WAF Developer Guide.\n
" - } - }, - "MetricName": { + "DataId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n A name of the Amazon CloudWatch metric. \n
" + "smithy.api#documentation": "A unique identifier for a predicate in a rule, such as ByteMatchSetId
or IPSetId
.\n
\n A boolean indicating whether WAF should store a sampling of the web requests that match the rules. \n You can view the sampled requests through the WAF console.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Defines and enables Amazon CloudWatch metrics and web request sample collection.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2WebAclActionDetails": { - "type": "structure", - "members": { - "Allow": { - "target": "com.amazonaws.securityhub#AwsWafv2ActionAllowDetails", - "traits": { - "smithy.api#documentation": "\n Specifies that WAF should allow requests by default.\n
" + "smithy.api#documentation": "Specifies if you want WAF to allow, block, or count requests based on the settings in the \n ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, or SizeConstraintSet
.\n
\n Specifies that WAF should block requests by default.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies the action that Amazon CloudFront or WAF takes when a web request matches the conditions in the rule.\n
" - } - }, - "com.amazonaws.securityhub#AwsWafv2WebAclCaptchaConfigDetails": { - "type": "structure", - "members": { - "ImmunityTimeProperty": { - "target": "com.amazonaws.securityhub#AwsWafv2WebAclCaptchaConfigImmunityTimePropertyDetails", - "traits": { - "smithy.api#documentation": "\n Determines how long a CAPTCHA timestamp in the token remains valid after the client successfully solves a CAPTCHA puzzle.\n
" - } - } - }, - "traits": { - "smithy.api#documentation": "\n Specifies how WAF should handle CAPTCHA evaluations for rules that don't have their own CaptchaConfig
settings.\n
\n The amount of time, in seconds, that a CAPTCHA or challenge timestamp is considered valid by WAF.\n
" + "smithy.api#documentation": "The type of predicate in a rule, such as ByteMatch
or IPSet
.\n
\n Used for CAPTCHA and challenge token settings. Determines how long a CAPTCHA or challenge timestamp remains valid after WAF updates it for a successful CAPTCHA or challenge response.\n
" + "smithy.api#documentation": "Provides details about the ByteMatchSet
, IPSet
, SqlInjectionMatchSet
, XssMatchSet
, \n RegexMatchSet
, GeoMatchSet
, and SizeConstraintSet
objects that you want to add to a rule and, \nfor each object, indicates whether you want to negate the settings.\n
\n The name of the web ACL.\n
" + "smithy.api#documentation": "A friendly name or description of the web ACL. You can't change the name of a web ACL after you create it.
" } }, - "Arn": { + "DefaultAction": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n The Amazon Resource Name (ARN) of the web ACL that you want to associate with the resource.\n
" + "smithy.api#documentation": "The action to perform if none of the rules contained in the web ACL match.
" } }, - "ManagedbyFirewallManager": { - "target": "com.amazonaws.securityhub#Boolean", + "Rules": { + "target": "com.amazonaws.securityhub#AwsWafWebAclRuleList", "traits": { - "smithy.api#default": false, - "smithy.api#documentation": "\n Indicates whether this web ACL is managed by Firewall Manager. \n
" + "smithy.api#documentation": "An array that contains the action for each rule in a web ACL, the priority of the rule, and the ID of the rule.
" } }, - "Id": { + "WebAclId": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n A unique identifier for the web ACL.\n
" + "smithy.api#documentation": "A unique identifier for a web ACL.
" } - }, - "Capacity": { - "target": "com.amazonaws.securityhub#Long", + } + }, + "traits": { + "smithy.api#documentation": "Provides information about an WAF web access control list (web ACL).
" + } + }, + "com.amazonaws.securityhub#AwsWafWebAclRule": { + "type": "structure", + "members": { + "Action": { + "target": "com.amazonaws.securityhub#WafAction", "traits": { - "smithy.api#default": 0, - "smithy.api#documentation": "\n The web ACL capacity units (WCUs) currently being used by this web ACL.\n
" + "smithy.api#documentation": "Specifies the action that CloudFront or WAF takes when a web request matches the\n conditions in the rule.
" } }, - "CaptchaConfig": { - "target": "com.amazonaws.securityhub#AwsWafv2WebAclCaptchaConfigDetails", + "ExcludedRules": { + "target": "com.amazonaws.securityhub#WafExcludedRuleList", "traits": { - "smithy.api#documentation": "\n Specifies how WAF should handle CAPTCHA evaluations for rules that don't have their own \n CaptchaConfig
settings.\n
Rules to exclude from a rule group.
" } }, - "DefaultAction": { - "target": "com.amazonaws.securityhub#AwsWafv2WebAclActionDetails", + "OverrideAction": { + "target": "com.amazonaws.securityhub#WafOverrideAction", "traits": { - "smithy.api#documentation": "\n The action to perform if none of the Rules contained in the web ACL match.\n
" + "smithy.api#documentation": "Use the OverrideAction
to test your RuleGroup
.
Any rule in a RuleGroup
can potentially block a request. If you set the OverrideAction
to\n None
, the RuleGroup
blocks a request if any individual rule in the RuleGroup
\n matches the request and is configured to block that request.
However, if you first want to test the RuleGroup
,\n set the OverrideAction
to Count
. The RuleGroup
\n then overrides any block action specified by individual rules contained within the group.\n Instead of blocking matching requests, those requests are counted.
\n ActivatedRule
|OverrideAction
applies only when updating or\n adding a RuleGroup
\n to a web ACL. In this case you do not use ActivatedRule
\n Action
. For all other update requests,\n ActivatedRule
\n Action
is used instead of ActivatedRule
\n OverrideAction
.
\n A description of the web ACL that helps with identification.\n
" + "smithy.api#default": 0, + "smithy.api#documentation": "Specifies the order in which the rules in a web\n ACL are evaluated. Rules with a lower value for Priority
are\n evaluated before rules with a higher value. The value must be a unique integer. If you add\n multiple rules to a web ACL, the values do not need to be consecutive.
\n The Rule statements used to identify the web requests that you want to allow, block, or count. Each rule includes one \n top-level statement that WAF uses to identify matching web requests, and parameters that govern how \n WAF handles them.\n
" + "smithy.api#documentation": "The identifier for a rule.
" } }, - "VisibilityConfig": { - "target": "com.amazonaws.securityhub#AwsWafv2VisibilityConfigDetails", + "Type": { + "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "\n Defines and enables Amazon CloudWatch metrics and web request sample collection.\n
" + "smithy.api#documentation": "The rule type.
\nValid values: REGULAR
| RATE_BASED
| GROUP
\n
The default is REGULAR
.
\n Details about an WAFv2 web Access Control List (ACL).\n
" + "smithy.api#documentation": "Details for a rule in an WAF web ACL.
" + } + }, + "com.amazonaws.securityhub#AwsWafWebAclRuleList": { + "type": "list", + "member": { + "target": "com.amazonaws.securityhub#AwsWafWebAclRule" } }, "com.amazonaws.securityhub#AwsXrayEncryptionConfigDetails": { @@ -18203,7 +16384,7 @@ "ErrorMessage": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "The message associated with the error. Possible values are:
\n\n Concurrent finding updates detected
\n
\n Finding Identifier is duplicated
\n
\n Finding Not Found
\n
\n Finding size exceeded 240 KB
\n
\n Internal service failure
\n
\n Invalid Input
\n
The message associated with the error. Possible values are:
\n\n Concurrent finding updates detected
\n
\n Finding Identifier is duplicated
\n
\n Finding Not Found
\n
\n Finding size exceeded 240 KB
\n
\n Internal service failure
\n
\n Invalid Input
\n
Whether to automatically enable Security Hub default standards \n for new member accounts in the organization.
\nThe default value of this parameter is equal to DEFAULT
.
If equal to DEFAULT
, then Security Hub default standards are automatically enabled for new member \n accounts. If equal to NONE
, then default standards are not automatically enabled for new member \n accounts.
Whether to automatically enable Security Hub default standards \n for new member accounts in the organization.
\nThe default value of this parameter is equal to DEFAULT
.
If equal to DEFAULT
, then Security Hub default standards are automatically enabled for new member \n accounts. If equal to NONE
, then default standards are not automatically enabled for new member \n accounts.
The account doesn't have permission to perform this action.
", + "smithy.api#documentation": "There is an issue with the account used to make the request. Either Security Hub is not enabled\n for the account, or the account does not have permission to perform this action.
", "smithy.api#error": "client", "smithy.api#httpError": 401 } @@ -21538,7 +19719,7 @@ "CidrBlockState": { "target": "com.amazonaws.securityhub#NonEmptyString", "traits": { - "smithy.api#documentation": "Information about the state of the CIDR block. Valid values are as follows:
\n\n associating
\n
\n associated
\n
\n disassociating
\n
\n disassociated
\n
\n failed
\n
\n failing
\n
Information about the state of the CIDR block. Valid values are as follows:
\n\t\t\n associating
\n
\n associated
\n
\n disassociating
\n
\n disassociated
\n
\n failed
\n
\n failing
\n
Provides details about an Backup backup, or recovery point.\n
" } - }, - "AwsEc2LaunchTemplate": { - "target": "com.amazonaws.securityhub#AwsEc2LaunchTemplateDetails" - }, - "AwsSageMakerNotebookInstance": { - "target": "com.amazonaws.securityhub#AwsSageMakerNotebookInstanceDetails" - }, - "AwsWafv2WebAcl": { - "target": "com.amazonaws.securityhub#AwsWafv2WebAclDetails" - }, - "AwsWafv2RuleGroup": { - "target": "com.amazonaws.securityhub#AwsWafv2RuleGroupDetails" } }, "traits": { @@ -24516,7 +22685,7 @@ "name": "securityhub" }, "aws.protocols#restJson1": {}, - "smithy.api#documentation": "Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status\n of your environment based on controls from supported security standards. Security Hub collects\n security data from Amazon Web Services accounts, services, and integrated third-party products and helps\n you analyze security trends in your environment to identify the highest priority security\n issues. For more information about Security Hub, see the \n Security HubUser\n Guide\n .
\nWhen you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services\n Region that is currently active or in the specific Amazon Web Services Region that you specify in your\n request. Any configuration or settings change that results from the operation is applied\n only to that Region. To make the same change in other Regions, execute the same command for\n each Region to apply the change to.
\nFor example, if your Region is set to us-west-2
, when you use CreateMembers
to add a member account to Security Hub, the association of\n the member account with the administrator account is created only in the us-west-2
\n Region. Security Hub must be enabled for the member account in the same Region that the invitation\n was sent from.
The following throttling limits apply to using Security Hub API operations.
\n\n BatchEnableStandards
- RateLimit
of 1\n request per second, BurstLimit
of 1 request per second.
\n GetFindings
- RateLimit
of 3 requests per second.\n BurstLimit
of 6 requests per second.
\n BatchImportFindings
- RateLimit
of 10 requests per second.\n BurstLimit
of 30 requests per second.
\n BatchUpdateFindings
- RateLimit
of 10 requests per second.\n BurstLimit
of 30 requests per second.
\n UpdateStandardsControl
- RateLimit
of\n 1 request per second, BurstLimit
of 5 requests per second.
All other operations - RateLimit
of 10 requests per second.\n BurstLimit
of 30 requests per second.
Security Hub provides you with a comprehensive view of the security state of your Amazon Web Services environment and resources. It also provides you with the readiness status\n of your environment based on controls from supported security standards. Security Hub collects\n security data from Amazon Web Services accounts, services, and integrated third-party products and helps\n you analyze security trends in your environment to identify the highest priority security\n issues. For more information about Security Hub, see the \n Security HubUser\n Guide\n .
\nWhen you use operations in the Security Hub API, the requests are executed only in the Amazon Web Services\n Region that is currently active or in the specific Amazon Web Services Region that you specify in your\n request. Any configuration or settings change that results from the operation is applied\n only to that Region. To make the same change in other Regions, execute the same command for\n each Region to apply the change to.
\nFor example, if your Region is set to us-west-2
, when you use CreateMembers
to add a member account to Security Hub, the association of\n the member account with the administrator account is created only in the us-west-2
\n Region. Security Hub must be enabled for the member account in the same Region that the invitation\n was sent from.
The following throttling limits apply to using Security Hub API operations.
\n\n BatchEnableStandards
- RateLimit
of 1\n request per second, BurstLimit
of 1 request per second.
\n GetFindings
- RateLimit
of 3 requests per second.\n BurstLimit
of 6 requests per second.
\n BatchImportFindings
- RateLimit
of 10 requests per second.\n BurstLimit
of 30 requests per second.
\n BatchUpdateFindings
- RateLimit
of 10 requests per second.\n BurstLimit
of 30 requests per second.
\n UpdateStandardsControl
- RateLimit
of\n 1 request per second, BurstLimit
of 5 requests per second.
All other operations - RateLimit
of 10 requests per second.\n BurstLimit
of 30 requests per second.
If you specify true
for this value, your automation or command continue to run\n even if we can't gather information about the state of your CloudWatch alarm. The default\n value is false
.
If you specify true
for this value, your automation or command continue to run even if we can't gather information about the state of your CloudWatch alarm. The default value is false
.
The details for the CloudWatch alarm you want to apply to an automation or\n command.
" + "smithy.api#documentation": "The details for the CloudWatch alarm you want to apply to an automation or command.
" } }, "com.amazonaws.ssm#AlarmList": { @@ -821,7 +821,7 @@ "name": "ssm" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Web Services Systems Manager is the operations hub for your Amazon Web Services applications and resources and a secure\n end-to-end management solution for hybrid cloud environments that enables safe and secure\n operations at scale.
\nThis reference is intended to be used with the Amazon Web Services Systems Manager User Guide. To get started, see Setting up Amazon Web Services Systems Manager.
\n\n Related resources\n
\nFor information about each of the capabilities that comprise Systems Manager, see Systems Manager capabilities in the Amazon Web Services Systems Manager User Guide.
\nFor details about predefined runbooks for Automation, a capability of Amazon Web Services Systems Manager, see the\n \n Systems Manager Automation runbook reference\n .
\nFor information about AppConfig, a capability of Systems Manager, see the \n AppConfig User Guide\n \n and the \n AppConfig\n API Reference\n .
\nFor information about Incident Manager, a capability of Systems Manager, see the \n Systems Manager Incident Manager User\n Guide\n and the \n Systems Manager Incident Manager API\n Reference\n .
\nAmazon Web Services Systems Manager is a collection of capabilities to help you manage your applications and\n infrastructure running in the Amazon Web Services Cloud;. Systems Manager simplifies application and resource management,\n shortens the time to detect and resolve operational problems, and helps you manage your Amazon Web Services\n resources securely at scale.
\nThis reference is intended to be used with the Amazon Web Services Systems Manager User Guide.
\nTo get started, verify prerequisites. For more information, see Setting up\n Amazon Web Services Systems Manager.
\n\n Related resources\n
\nFor information about how to use a Query API, see Making API requests.
\nFor information about other API operations you can perform on EC2 instances, see the\n Amazon EC2 API Reference.
\nFor information about AppConfig, a capability of Systems Manager, see the AppConfig User Guide and the AppConfig API\n Reference.
\nFor information about Incident Manager, a capability of Systems Manager, see the Incident Manager User Guide\n and the Incident Manager API\n Reference.
\nThe name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create an\n IAM service role for a hybrid environment in the\n Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must\n create a unique role.
\nThe name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create an\n IAM service role for a hybrid environment in the\n Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must create a unique role.
\nAdds or overwrites one or more tags for a State Manager association.\n Tags are metadata that you can assign to your Amazon Web Services resources. Tags enable\n you to categorize your resources in different ways, for example, by purpose, owner, or\n environment. Each tag consists of a key and an optional value, both of which you define.
" + "smithy.api#documentation": "Adds or overwrites one or more tags for a State Manager association. Tags\n are metadata that you can assign to your Amazon Web Services resources. Tags enable you to categorize your\n resources in different ways, for example, by purpose, owner, or environment. Each tag consists of\n a key and an optional value, both of which you define.
" } }, "AlarmConfiguration": { @@ -11774,7 +11783,7 @@ "code": "DocumentPermissionLimit", "httpResponseCode": 400 }, - "smithy.api#documentation": "The document can't be shared with more Amazon Web Services user accounts. You can specify a maximum of\n 20 accounts per API operation to share a private document.
\n\nBy default, you can share a private document with a maximum of 1,000 accounts and publicly\n share up to five documents.
\n\nIf you need to increase the quota for privately or publicly shared Systems Manager documents, contact\n Amazon Web Services Support.
", + "smithy.api#documentation": "The document can't be shared with more Amazon Web Services user accounts. You can specify a maximum of 20 accounts per API operation to share a private document.
\n\nBy default, you can share a private document with a maximum of 1,000 accounts and publicly share up to five documents.
\n \nIf you need to increase the quota for privately or publicly shared Systems Manager documents, contact Amazon Web Services Support.
", "smithy.api#error": "client" } }, @@ -13683,7 +13692,7 @@ "AlarmConfiguration": { "target": "com.amazonaws.ssm#AlarmConfiguration", "traits": { - "smithy.api#documentation": "The details for the CloudWatch alarm you applied to your maintenance window\n task.
" + "smithy.api#documentation": "The details for the CloudWatch alarm you applied to your maintenance window task.
" } }, "TriggeredAlarms": { @@ -13942,7 +13951,7 @@ "AlarmConfiguration": { "target": "com.amazonaws.ssm#AlarmConfiguration", "traits": { - "smithy.api#documentation": "The details for the CloudWatch alarm you applied to your maintenance window\n task.
" + "smithy.api#documentation": "The details for the CloudWatch alarm you applied to your maintenance window task.
" } } } @@ -15575,21 +15584,21 @@ "target": "com.amazonaws.ssm#PatchCriticalNonCompliantCount", "traits": { "smithy.api#default": null, - "smithy.api#documentation": "The number of patches per node that are specified as Critical
for compliance\n reporting in the patch baseline aren't installed. These patches might be missing, have failed\n installation, were rejected, or were installed but awaiting a required managed node reboot. The\n status of these managed nodes is NON_COMPLIANT
.
The number of patches per node that are specified as Critical
for\n compliance reporting in the patch baseline aren't installed. These patches might be missing, have\n failed installation, were rejected, or were installed but awaiting a required managed node\n reboot. The status of these managed nodes is NON_COMPLIANT
.
The number of patches per node that are specified as Security
in a patch\n advisory aren't installed. These patches might be missing, have failed installation, were\n rejected, or were installed but awaiting a required managed node reboot. The status of these\n managed nodes is NON_COMPLIANT
.
The number of patches per node that are specified as Security
in a\n patch advisory aren't installed. These patches might be missing, have failed installation, were\n rejected, or were installed but awaiting a required managed node reboot. The status of these\n managed nodes is NON_COMPLIANT
.
The number of patches per node that are specified as other than Critical
or\n Security
but aren't compliant with the patch baseline. The status of these managed\n nodes is NON_COMPLIANT
.
The number of patches per node that are specified as other than\n Critical
or Security
but aren't compliant with the patch baseline. The\n status of these managed nodes is NON_COMPLIANT
.
Amazon Resource Name (ARN) of the resource to which you want to attach a policy.
", + "smithy.api#documentation": "Amazon Resource Name (ARN) of the resource to which the policies are attached.
", "smithy.api#required": {} } }, @@ -24484,7 +24493,7 @@ "PolicyHash": { "target": "com.amazonaws.ssm#PolicyHash", "traits": { - "smithy.api#documentation": "ID of the current policy version. The hash helps to prevent a situation where multiple users\n attempt to overwrite a policy. You must provide this hash when updating or deleting a\n policy.
" + "smithy.api#documentation": "ID of the current policy version. The hash helps to prevent a situation where multiple users\n attempt to overwrite a policy.
" } } } @@ -24495,13 +24504,13 @@ "PolicyId": { "target": "com.amazonaws.ssm#PolicyId", "traits": { - "smithy.api#documentation": "The policy ID. To update a policy, you must specify PolicyId
and\n PolicyHash
.
The policy ID. To update a policy, you must specify PolicyId
and PolicyHash
.
ID of the current policy version.
" + "smithy.api#documentation": "ID of the current policy version. The hash helps to prevent a situation where multiple users\n attempt to overwrite a policy. You must provide this hash when updating or deleting a\n policy.
" } } } @@ -25782,7 +25791,7 @@ "code": "ResourcePolicyLimitExceededException", "httpResponseCode": 400 }, - "smithy.api#documentation": "The PutResourcePolicy API action enforces two limits. A policy can't be\n greater than 1024 bytes in size. And only one policy can be attached to\n OpsItemGroup
. Verify these limits and try again.
The PutResourcePolicy API action enforces two limits. A policy can't be\n greater than 1024 bytes in size. And only one policy can be attached to OpsItemGroup
. Verify these limits and try again.
The details for the CloudWatch alarm you applied to your maintenance window\n task.
" + "smithy.api#documentation": "The details for the CloudWatch alarm you applied to your maintenance window task.
" } } } @@ -29427,7 +29436,7 @@ "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create an\n IAM service role for a hybrid environment in the\n Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must\n create a unique role.
\nThe name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com
. For more information, see Create an\n IAM service role for a hybrid environment in the\n Amazon Web Services Systems Manager User Guide.
You can't specify an IAM service-linked role for this parameter. You must create a unique role.
\nThe Amazon Web Services Support API Reference is intended for programmers who need detailed\n information about the Amazon Web Services Support operations and data types. You can use the API to manage\n your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return\n results in JSON format.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
You can also use the Amazon Web Services Support API to access features for Trusted Advisor. You can return a list of\n checks and their descriptions, get check results, specify checks to refresh, and get the\n refresh status of checks.
\nYou can manage your support cases with the following Amazon Web Services Support API operations:
\nThe CreateCase, DescribeCases, DescribeAttachment, and ResolveCase operations\n create Amazon Web Services Support cases, retrieve information about cases, and resolve cases.
\nThe DescribeCommunications, AddCommunicationToCase, and AddAttachmentsToSet operations retrieve and add communications and attachments to Amazon Web Services Support\n cases.
\nThe DescribeServices and DescribeSeverityLevels operations return Amazon Web Service names, service codes, service categories, and problem\n severity levels. You use these values when you call the CreateCase operation.
\nYou can also use the Amazon Web Services Support API to call the Trusted Advisor operations. For more\n information, see Trusted Advisor in the\n Amazon Web Services Support User Guide.
\nFor authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing\n Process.
\nFor more information about this service and the endpoints to use, see About the\n Amazon Web Services Support API in the Amazon Web Services Support User Guide.
", + "smithy.api#documentation": "The Amazon Web Services Support API Reference is intended for programmers who need detailed\n information about the Amazon Web Services Support operations and data types. You can use the API to manage\n your support cases programmatically. The Amazon Web Services Support API uses HTTP methods that return\n results in JSON format.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The Amazon Web Services Support service also exposes a set of Trusted Advisor features. You can\n retrieve a list of checks and their descriptions, get check results, specify checks to\n refresh, and get the refresh status of checks.
\nThe following list describes the Amazon Web Services Support case management operations:
\nService names, issue categories, and available severity levels - The\n DescribeServices and DescribeSeverityLevels operations return Amazon Web Services service names,\n service codes, service categories, and problem severity levels. You use these\n values when you call the CreateCase operation.
\nCase creation, case details, and case resolution - The CreateCase, DescribeCases, DescribeAttachment, and ResolveCase operations\n create Amazon Web Services Support cases, retrieve information about cases, and resolve cases.
\nCase communication - The DescribeCommunications,\n AddCommunicationToCase, and AddAttachmentsToSet operations retrieve and add communications\n and attachments to Amazon Web Services Support cases.
\nThe following list describes the operations available from the Amazon Web Services Support service for\n Trusted Advisor:
\n\n DescribeTrustedAdvisorChecks returns the list of checks that\n run against your Amazon Web Services resources.
\nUsing the checkId
for a specific check returned by DescribeTrustedAdvisorChecks, you can call DescribeTrustedAdvisorCheckResult to obtain the results for the\n check that you specified.
\n DescribeTrustedAdvisorCheckSummaries returns summarized\n results for one or more Trusted Advisor checks.
\n\n RefreshTrustedAdvisorCheck requests that Trusted Advisor rerun a\n specified check.
\n\n DescribeTrustedAdvisorCheckRefreshStatuses reports the refresh\n status of one or more checks.
\nFor authentication of requests, Amazon Web Services Support uses Signature Version 4 Signing\n Process.
\nSee About the\n Amazon Web Services Support API in the Amazon Web Services Support User Guide for\n information about how to use this service to create and manage your support cases, and\n how to call Trusted Advisor for results of checks on your resources.
", "smithy.api#title": "AWS Support", "smithy.api#xmlNamespace": { "uri": "http://support.amazonaws.com/doc/2013-04-15/" @@ -98,7 +98,7 @@ "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -147,6 +147,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -427,8 +436,8 @@ "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-east-1", - "signingName": "support" + "signingName": "support", + "signingRegion": "us-east-1" } ] }, @@ -454,8 +463,8 @@ "authSchemes": [ { "name": "sigv4", - "signingRegion": "cn-north-1", - "signingName": "support" + "signingName": "support", + "signingRegion": "cn-north-1" } ] }, @@ -481,8 +490,8 @@ "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-gov-west-1", - "signingName": "support" + "signingName": "support", + "signingRegion": "us-gov-west-1" } ] }, @@ -508,8 +517,8 @@ "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-iso-east-1", - "signingName": "support" + "signingName": "support", + "signingRegion": "us-iso-east-1" } ] }, @@ -535,8 +544,8 @@ "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-isob-east-1", - "signingName": "support" + "signingName": "support", + "signingRegion": "us-isob-east-1" } ] }, @@ -568,9 +577,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "cn-north-1", + "name": "sigv4", "signingName": "support", - "name": "sigv4" + "signingRegion": "cn-north-1" } ] }, @@ -578,9 +587,9 @@ } }, "params": { - "UseFIPS": false, "Region": "aws-cn-global", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -590,9 +599,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-east-1", + "name": "sigv4", "signingName": "support", - "name": "sigv4" + "signingRegion": "us-east-1" } ] }, @@ -600,9 +609,9 @@ } }, "params": { - "UseFIPS": false, "Region": "aws-global", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -612,9 +621,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-iso-east-1", + "name": "sigv4", "signingName": "support", - "name": "sigv4" + "signingRegion": "us-iso-east-1" } ] }, @@ -622,9 +631,9 @@ } }, "params": { - "UseFIPS": false, "Region": "aws-iso-global", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -635,9 +644,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": true } }, { @@ -648,9 +657,9 @@ } }, "params": { - "UseFIPS": true, "Region": "us-gov-west-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": true } }, { @@ -661,9 +670,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", - "UseDualStack": true + "UseDualStack": true, + "UseFIPS": false } }, { @@ -674,9 +683,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-gov-west-1", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -686,9 +695,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-isob-east-1", + "name": "sigv4", "signingName": "support", - "name": "sigv4" + "signingRegion": "us-isob-east-1" } ] }, @@ -696,9 +705,9 @@ } }, "params": { - "UseFIPS": false, "Region": "aws-iso-b-global", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -708,9 +717,9 @@ "properties": { "authSchemes": [ { - "signingRegion": "us-gov-west-1", + "name": "sigv4", "signingName": "support", - "name": "sigv4" + "signingRegion": "us-gov-west-1" } ] }, @@ -718,9 +727,9 @@ } }, "params": { - "UseFIPS": false, "Region": "aws-us-gov-global", - "UseDualStack": false + "UseDualStack": false, + "UseFIPS": false } }, { @@ -731,9 +740,9 @@ } }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -743,9 +752,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, "Region": "us-east-1", "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -755,9 +764,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, "Region": "us-east-1", "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } @@ -792,7 +801,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds one or more attachments to an attachment set.
\nAn attachment set is a temporary container for attachments that you add to a case or\n case communication. The set is available for 1 hour after it's created. The\n expiryTime
returned in the response is when the set expires.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Adds one or more attachments to an attachment set.
\nAn attachment set is a temporary container for attachments that you add to a case or\n case communication. The set is available for 1 hour after it's created. The\n expiryTime
returned in the response is when the set expires.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
One or more attachments to add to the set. You can add up to three attachments per\n set. The size limit is 5 MB per attachment.
\nIn the Attachment
object, use the data
parameter to specify\n the contents of the attachment file. In the previous request syntax, the value for\n data
appear as blob
, which is represented as a\n base64-encoded string. The value for fileName
is the name of the\n attachment, such as troubleshoot-screenshot.png
.
One or more attachments to add to the set. You can add up to three attachments per\n set. The size limit is 5 MB per attachment.
\nIn the Attachment
object, use the data
parameter to specify\n the contents of the attachment file. In the previous request syntax, the value for\n data
appear as blob
, which is represented as a\n base64-encoded string. The value for fileName
is the name of the\n attachment, such as troubleshoot-screenshot.png
.
Adds additional customer communication to an Amazon Web Services Support case. Use the caseId
\n parameter to identify the case to which to add communication. You can list a set of\n email addresses to copy on the communication by using the ccEmailAddresses
\n parameter. The communicationBody
value contains the text of the\n communication.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Adds additional customer communication to an Amazon Web Services Support case. Use the caseId
\n parameter to identify the case to which to add communication. You can list a set of\n email addresses to copy on the communication by using the ccEmailAddresses
\n parameter. The communicationBody
value contains the text of the\n communication.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The support case ID requested or returned in the call. The case ID is an alphanumeric\n string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" + "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an\n alphanumeric string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" } }, "communicationBody": { @@ -884,7 +893,7 @@ "attachmentSetId": { "target": "com.amazonaws.support#AttachmentSetId", "traits": { - "smithy.api#documentation": "The ID of a set of one or more attachments for the communication to add to the case.\n Create the set by calling AddAttachmentsToSet\n
" + "smithy.api#documentation": "The ID of a set of one or more attachments for the communication to add to the case.\n Create the set by calling AddAttachmentsToSet\n
" } } } @@ -1070,7 +1079,7 @@ "caseId": { "target": "com.amazonaws.support#CaseId", "traits": { - "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an alphanumeric\n string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" + "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an\n alphanumeric string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" } }, "displayId": { @@ -1088,7 +1097,7 @@ "status": { "target": "com.amazonaws.support#Status", "traits": { - "smithy.api#documentation": "The status of the case.
\nValid values:
\n\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
The status of the case.
\nValid values:
\n\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language
parameter if you want support in that language.
The ISO 639-1 code for the language in which Amazon Web Services provides support. Amazon Web Services Support\n currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be\n passed explicitly for operations that take them.
" } } }, "traits": { - "smithy.api#documentation": "A JSON-formatted object that contains the metadata for a support case. It is contained\n in the response from a DescribeCases request. CaseDetails contains the following fields:
\n\n caseId - The support case ID requested\n or returned in the call. The case ID is an alphanumeric string formatted as\n shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47.
\n\n categoryCode - The category of problem\n for the support case. Corresponds to the CategoryCode
values\n returned by a call to DescribeServices.
\n displayId - The identifier for the case\n on pages in the Amazon Web Services Support Center.
\n\n language - The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language
parameter if you want support in that language.
\n nextToken - A resumption point for\n pagination.
\n\n recentCommunications - One or more Communication objects. Fields of these objects are\n attachments
, body
, caseId
,\n submittedBy
, and timeCreated
.
\n serviceCode - The identifier for the\n Amazon Web Services service that corresponds to the service code defined in the call to DescribeServices.
\n\n severityCode - The severity code\n assigned to the case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are:\n low
, normal
, high
,\n urgent
, and critical
.
\n status - The status of the case in the\n Amazon Web Services Support Center. Valid values:
\n\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
\n subject - The subject line of the\n case.
\n\n submittedBy - The email address of the\n account that submitted the case.
\n\n timeCreated - The time the case was\n created, in ISO-8601 format.
\nA JSON-formatted object that contains the metadata for a support case. It is contained\n in the response from a DescribeCases request. CaseDetails contains the following fields:
\n\n caseId - The support case ID requested or\n returned in the call. The case ID is an alphanumeric string formatted as shown\n in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47.
\n\n categoryCode - The category of problem for the\n support case. Corresponds to the CategoryCode
values returned by a call to\n DescribeServices.
\n displayId - The identifier for the case on pages\n in the Amazon Web Services Support Center.
\n\n language - The ISO 639-1 code for the language\n in which Amazon Web Services provides support. Amazon Web Services Support currently supports English (\"en\") and\n Japanese (\"ja\"). Language parameters must be passed explicitly for operations\n that take them.
\n\n nextToken - A resumption point for\n pagination.
\n\n recentCommunications - One or more Communication objects. Fields of these objects are\n attachments
, body
, caseId
,\n submittedBy
, and timeCreated
.
\n serviceCode - The identifier for the Amazon Web Services service\n that corresponds to the service code defined in the call to DescribeServices.
\n\n severityCode - The severity code assigned to the\n case. Contains one of the values returned by the call to DescribeSeverityLevels. The possible values are:\n low
, normal
, high
,\n urgent
, and critical
.
\n status - The status of the case in the Amazon Web Services Support Center. Valid values:
\n\n opened
\n
\n pending-customer-action
\n
\n reopened
\n
\n resolved
\n
\n unassigned
\n
\n work-in-progress
\n
\n subject - The subject line of the case.
\n\n submittedBy - The email address of the account\n that submitted the case.
\n\n timeCreated - The time the case was created, in\n ISO-8601 format.
\nThe support case ID requested or returned in the call. The case ID is an alphanumeric\n string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" + "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an\n alphanumeric string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" } }, "body": { @@ -1265,17 +1268,11 @@ } }, "traits": { - "smithy.api#documentation": "A communication associated with a support case. The communication consists of the case\n ID, the message body, attachment information, the submitter of the communication, and\n the date and time of the communication.
" + "smithy.api#documentation": "A communication associated with a support case. The communication consists of the\n case ID, the message body, attachment information, the submitter of the communication,\n and the date and time of the communication.
" } }, "com.amazonaws.support#CommunicationBody": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 8000 - } - } + "type": "string" }, "com.amazonaws.support#CommunicationList": { "type": "list", @@ -1306,7 +1303,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a case in the Amazon Web Services Support Center. This operation is similar to how you create a case\n in the Amazon Web Services Support Center Create\n Case page.
\nThe Amazon Web Services Support API doesn't support requesting service limit increases. You can submit a\n service limit increase in the following ways:
\nSubmit a request from the Amazon Web Services Support Center Create Case page.
\nUse the Service Quotas RequestServiceQuotaIncrease operation.
\nA successful CreateCase
request returns an Amazon Web Services Support case number. You can use\n the DescribeCases operation and specify the case number to get\n existing Amazon Web Services Support cases. After you create a case, use the AddCommunicationToCase operation to add additional communication or\n attachments to an existing case.
The caseId
is separate from the displayId
that appears in\n the Amazon Web Services Support Center. Use the DescribeCases operation to get the displayId
.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Creates a case in the Amazon Web Services Support Center. This operation is similar to how you create a case\n in the Amazon Web Services Support Center Create\n Case page.
\nThe Amazon Web Services Support API doesn't support requesting service limit increases. You can submit a\n service limit increase in the following ways:
\nSubmit a request from the Amazon Web Services Support Center Create Case page.
\nUse the Service Quotas RequestServiceQuotaIncrease operation.
\nA successful CreateCase
request returns an Amazon Web Services Support case number. You can use\n the DescribeCases operation and specify the case number to get\n existing Amazon Web Services Support cases. After you create a case, use the AddCommunicationToCase operation to add additional communication or\n attachments to an existing case.
The caseId
is separate from the displayId
that appears in\n the Amazon Web Services Support Center. Use the DescribeCases operation to get the displayId
.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
A value that indicates the urgency of the case. This value determines the response\n time according to your service level agreement with Amazon Web Services Support. You can use the DescribeSeverityLevels operation to get the possible values for\n severityCode
.
For more information, see SeverityLevel and Choosing a\n Severity in the Amazon Web Services Support User Guide.
\nThe availability of severity levels depends on the support plan for the\n Amazon Web Services account.
\nA value that indicates the urgency of the case. This value determines the response\n time according to your service level agreement with Amazon Web Services Support. You can use the DescribeSeverityLevels operation to get the possible values for\n severityCode
.
For more information, see SeverityLevel and Choosing a\n Severity in the Amazon Web Services Support User Guide.
\nThe availability of severity levels depends on the support plan for the\n Amazon Web Services account.
\nA list of email addresses that Amazon Web Services Support copies on case correspondence. Amazon Web Services Support\n identifies the account that creates the case when you specify your Amazon Web Services credentials in\n an HTTP POST method or use the Amazon Web Services SDKs.\n
" + "smithy.api#documentation": "A list of email addresses that Amazon Web Services Support copies on case correspondence. Amazon Web Services Support\n identifies the account that creates the case when you specify your Amazon Web Services credentials in an\n HTTP POST method or use the Amazon Web Services SDKs.\n
" } }, "language": { "target": "com.amazonaws.support#Language", "traits": { - "smithy.api#documentation": "The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language
parameter if you want support in that language.
The language in which Amazon Web Services Support handles the case. You must specify the ISO 639-1\n code for the language
parameter if you want support in that language.\n Currently, English (\"en\") and Japanese (\"ja\") are supported.
The support case ID requested or returned in the call. The case ID is an alphanumeric\n string in the following format:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" + "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an\n alphanumeric string in the following format:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" } } }, @@ -1407,7 +1404,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the attachment that has the specified ID. Attachments can include screenshots,\n error logs, or other files that describe your issue. Attachment IDs are generated by the\n case management system when you add an attachment to a case or case communication.\n Attachment IDs are returned in the AttachmentDetails objects that are\n returned by the DescribeCommunications operation.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Returns the attachment that has the specified ID. Attachments can include screenshots,\n error logs, or other files that describe your issue. Attachment IDs are generated by the\n case management system when you add an attachment to a case or case communication.\n Attachment IDs are returned in the AttachmentDetails objects that are\n returned by the DescribeCommunications operation.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
This object includes the attachment content and file name.
\nIn the previous response syntax, the value for the data
parameter appears\n as blob
, which is represented as a base64-encoded string. The value for\n fileName
is the name of the attachment, such as\n troubleshoot-screenshot.png
.
This object includes the attachment content and file name.
\nIn the previous response syntax, the value for the data
parameter appears\n as blob
, which is represented as a base64-encoded string. The value for\n fileName
is the name of the attachment, such as\n troubleshoot-screenshot.png
.
Returns a list of cases that you specify by passing one or more case IDs. You can use\n the afterTime
and beforeTime
parameters to filter the cases by\n date. You can set values for the includeResolvedCases
and\n includeCommunications
parameters to specify how much information to\n return.
The response returns the following in JSON format:
\nOne or more CaseDetails data types.
\nOne or more nextToken
values, which specify where to paginate the\n returned records represented by the CaseDetails
objects.
Case data is available for 12 months after creation. If a case was created more than\n 12 months ago, a request might return an error.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Returns a list of cases that you specify by passing one or more case IDs. You can use\n the afterTime
and beforeTime
parameters to filter the cases by\n date. You can set values for the includeResolvedCases
and\n includeCommunications
parameters to specify how much information to\n return.
The response returns the following in JSON format:
\nOne or more CaseDetails data types.
\nOne or more nextToken
values, which specify where to paginate the\n returned records represented by the CaseDetails
objects.
Case data is available for 12 months after creation. If a case was created more than\n 12 months ago, a request might return an error.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language
parameter if you want support in that language.
The ISO 639-1 code for the language in which Amazon Web Services provides support. Amazon Web Services Support\n currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be\n passed explicitly for operations that take them.
" } }, "includeCommunications": { @@ -1574,7 +1571,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns communications and attachments for one or more support cases. Use the\n afterTime
and beforeTime
parameters to filter by date. You\n can use the caseId
parameter to restrict the results to a specific\n case.
Case data is available for 12 months after creation. If a case was created more than\n 12 months ago, a request for data might cause an error.
\nYou can use the maxResults
and nextToken
parameters to\n control the pagination of the results. Set maxResults
to the number of\n cases that you want to display on each page, and use nextToken
to specify\n the resumption of pagination.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Returns communications and attachments for one or more support cases. Use the\n afterTime
and beforeTime
parameters to filter by date. You\n can use the caseId
parameter to restrict the results to a specific\n case.
Case data is available for 12 months after creation. If a case was created more than\n 12 months ago, a request for data might cause an error.
\nYou can use the maxResults
and nextToken
parameters to\n control the pagination of the results. Set maxResults
to the number of\n cases that you want to display on each page, and use nextToken
to specify\n the resumption of pagination.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The support case ID requested or returned in the call. The case ID is an alphanumeric\n string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
", + "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an\n alphanumeric string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
", "smithy.api#required": {} } }, @@ -1653,7 +1650,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the current list of Amazon Web Services services and a list of service categories for each\n service. You then use service names and categories in your CreateCase\n requests. Each Amazon Web Services service has its own set of categories.
\nThe service codes and category codes correspond to the values that appear in the\n Service and Category lists on the Amazon Web Services Support Center Create Case page. The values in those fields\n don't necessarily match the service codes and categories returned by the\n DescribeServices
operation. Always use the service codes and categories\n that the DescribeServices
operation returns, so that you have the most\n recent set of service and category codes.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Returns the current list of Amazon Web Services services and a list of service categories for each\n service. You then use service names and categories in your CreateCase\n requests. Each Amazon Web Services service has its own set of categories.
\nThe service codes and category codes correspond to the values that appear in the\n Service and Category lists on the Amazon Web Services Support Center Create Case page. The values in those fields\n don't necessarily match the service codes and categories returned by the\n DescribeServices
operation. Always use the service codes and categories\n that the DescribeServices
operation returns, so that you have the most\n recent set of service and category codes.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language
parameter if you want support in that language.
The ISO 639-1 code for the language in which Amazon Web Services provides support. Amazon Web Services Support\n currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be\n passed explicitly for operations that take them.
" } } } @@ -1701,7 +1698,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the list of severity levels that you can assign to a support case. The\n severity level for a case is also a field in the CaseDetails data type\n that you include for a CreateCase request.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Returns the list of severity levels that you can assign to a support case. The severity\n level for a case is also a field in the CaseDetails data type that you\n include for a CreateCase request.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The language in which Amazon Web Services Support handles the case. Amazon Web Services Support\ncurrently supports English (\"en\") and Japanese (\"ja\"). You must specify the ISO 639-1 code for the language
parameter if you want support in that language.
The ISO 639-1 code for the language in which Amazon Web Services provides support. Amazon Web Services Support\n currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be\n passed explicitly for operations that take them.
" } } } @@ -1743,7 +1740,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the refresh status of the Trusted Advisor checks that have the specified check\n IDs. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.
\nSome checks are refreshed automatically, and you can't return their refresh statuses\n by using the DescribeTrustedAdvisorCheckRefreshStatuses
operation. If you\n call this operation for these checks, you might see an\n InvalidParameterValue
error.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
To call the Trusted Advisor operations in\nthe Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) \nendpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support\nAPI in the Amazon Web Services Support User Guide.
" + "smithy.api#documentation": "Returns the refresh status of the Trusted Advisor checks that have the specified check\n IDs. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.
\nSome checks are refreshed automatically, and you can't return their refresh statuses\n by using the DescribeTrustedAdvisorCheckRefreshStatuses
operation. If you\n call this operation for these checks, you might see an\n InvalidParameterValue
error.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The IDs of the Trusted Advisor checks to get the status.
\nIf you specify the check ID of a check that is automatically refreshed, you might\n see an InvalidParameterValue
error.
The IDs of the Trusted Advisor checks to get the status.
\nIf you specify the check ID of a check that is automatically refreshed, you might\n see an InvalidParameterValue
error.
Returns the results of the Trusted Advisor check that has the specified check ID. You\n can get the check IDs by calling the DescribeTrustedAdvisorChecks\n operation.
\nThe response contains a TrustedAdvisorCheckResult object, which\n contains these three objects:
\nIn addition, the response contains these fields:
\n\n status - The alert status of the check\n can be ok
(green), warning
(yellow),\n error
(red), or not_available
.
\n timestamp - The time of the last refresh\n of the check.
\n\n checkId - The unique identifier for the\n check.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
To call the Trusted Advisor operations in\nthe Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) \nendpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support\nAPI in the Amazon Web Services Support User Guide.
" + "smithy.api#documentation": "Returns the results of the Trusted Advisor check that has the specified check ID. You\n can get the check IDs by calling the DescribeTrustedAdvisorChecks\n operation.
\nThe response contains a TrustedAdvisorCheckResult object, which\n contains these three objects:
\nIn addition, the response contains these fields:
\n\n status - The alert status of the check\n can be ok
(green), warning
(yellow),\n error
(red), or not_available
.
\n timestamp - The time of the last refresh\n of the check.
\n\n checkId - The unique identifier for the\n check.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The ISO 639-1 code for the language that you want your check results to appear\n in.
\nThe Amazon Web Services Support API currently supports the following languages for Trusted Advisor:
\nChinese, Simplified - zh
\n
Chinese, Traditional - zh_TW
\n
English - en
\n
French - fr
\n
German - de
\n
Indonesian - id
\n
Italian - it
\n
Japanese - ja
\n
Korean - ko
\n
Portuguese, Brazilian - pt_BR
\n
Spanish - es
\n
The ISO 639-1 code for the language in which Amazon Web Services provides support. Amazon Web Services Support\n currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be\n passed explicitly for operations that take them.
" } } }, @@ -1839,7 +1836,7 @@ } ], "traits": { - "smithy.api#documentation": "Returns the results for the Trusted Advisor check summaries for the check IDs that you\n specified. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.
\nThe response contains an array of TrustedAdvisorCheckSummary\n objects.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
To call the Trusted Advisor operations in\nthe Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) \nendpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support\nAPI in the Amazon Web Services Support User Guide.
" + "smithy.api#documentation": "Returns the results for the Trusted Advisor check summaries for the check IDs that you\n specified. You can get the check IDs by calling the DescribeTrustedAdvisorChecks operation.
\nThe response contains an array of TrustedAdvisorCheckSummary\n objects.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Returns information about all available Trusted Advisor checks, including the name, ID,\n category, description, and metadata. You must specify a language code.
\nThe response contains a TrustedAdvisorCheckDescription object for\n each check. You must set the Amazon Web Services Region to us-east-1.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the SubscriptionRequiredException
error\n message appears. For information about changing your support plan, see\n Amazon Web Services Support.
The names and descriptions for Trusted Advisor checks are subject to change. We\n recommend that you specify the check ID in your code to uniquely identify a\n check.
\nTo call the Trusted Advisor operations in\nthe Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) \nendpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support\nAPI in the Amazon Web Services Support User Guide.
" + "smithy.api#documentation": "Returns information about all available Trusted Advisor checks, including the name, ID,\n category, description, and metadata. You must specify a language code. The Amazon Web Services Support API\n currently supports English (\"en\") and Japanese (\"ja\"). The response contains a TrustedAdvisorCheckDescription object for each check. You must set the\n Amazon Web Services Region to us-east-1.
\n \n \nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The names and descriptions for Trusted Advisor checks are subject to change. We recommend\n that you specify the check ID in your code to uniquely identify a check.
\nThe ISO 639-1 code for the language that you want your checks to appear in.
\nThe Amazon Web Services Support API currently supports the following languages for Trusted Advisor:
\nChinese, Simplified - zh
\n
Chinese, Traditional - zh_TW
\n
English - en
\n
French - fr
\n
German - de
\n
Indonesian - id
\n
Italian - it
\n
Japanese - ja
\n
Korean - ko
\n
Portuguese, Brazilian - pt_BR
\n
Spanish - es
\n
The ISO 639-1 code for the language in which Amazon Web Services provides support. Amazon Web Services Support\n currently supports English (\"en\") and Japanese (\"ja\"). Language parameters must be\n passed explicitly for operations that take them.
", "smithy.api#required": {} } } @@ -2013,7 +2010,7 @@ } ], "traits": { - "smithy.api#documentation": "Refreshes the Trusted Advisor check that you specify using the check ID. You can get the\n check IDs by calling the DescribeTrustedAdvisorChecks\n operation.
\nSome checks are refreshed automatically. If you call the\n RefreshTrustedAdvisorCheck
operation to refresh them, you might see\n the InvalidParameterValue
error.
The response contains a TrustedAdvisorCheckRefreshStatus\n object.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
To call the Trusted Advisor operations in\nthe Amazon Web Services Support API, you must use the US East (N. Virginia) endpoint. Currently, the US West (Oregon) and Europe (Ireland) \nendpoints don't support the Trusted Advisor operations. For more information, see About the Amazon Web Services Support\nAPI in the Amazon Web Services Support User Guide.
" + "smithy.api#documentation": "Refreshes the Trusted Advisor check that you specify using the check ID. You can get the\n check IDs by calling the DescribeTrustedAdvisorChecks\n operation.
\nSome checks are refreshed automatically. If you call the\n RefreshTrustedAdvisorCheck
operation to refresh them, you might see\n the InvalidParameterValue
error.
The response contains a TrustedAdvisorCheckRefreshStatus\n object.
\nYou must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The unique identifier for the Trusted Advisor check to refresh.
\nSpecifying the check ID of a check that is automatically refreshed causes an\n InvalidParameterValue
error.
The unique identifier for the Trusted Advisor check to refresh.
\nSpecifying the check ID of a check that is automatically refreshed\n causes an InvalidParameterValue
error.
Resolves a support case. This operation takes a caseId
and returns the\n initial and final state of the case.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that doesn't have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
Resolves a support case. This operation takes a caseId
and returns the\n initial and final state of the case.
You must have a Business, Enterprise On-Ramp, or Enterprise Support plan to use the Amazon Web Services Support\n API.
\nIf you call the Amazon Web Services Support API from an account that does not have a\n Business, Enterprise On-Ramp, or Enterprise Support plan, the\n SubscriptionRequiredException
error message appears. For\n information about changing your support plan, see Amazon Web Services Support.
The support case ID requested or returned in the call. The case ID is an alphanumeric\n string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" + "smithy.api#documentation": "The support case ID requested or returned in the call. The case ID is an\n alphanumeric string formatted as shown in this example:\n case-12345678910-2013-c4c1d2bf33c5cf47\n
" } } } @@ -2165,13 +2162,13 @@ "code": { "target": "com.amazonaws.support#SeverityLevelCode", "traits": { - "smithy.api#documentation": "The code for case severity level.
\nValid values: low
| normal
| high
|\n urgent
| critical
\n
The code for case severity level.
\nValid values: low
| normal
| high
|\n urgent
| critical
\n
The name of the severity level that corresponds to the severity level code.
\nThe values returned by the API are different from the values that appear in the\n Amazon Web Services Support Center. For example, the API uses the code low
, but the name\n appears as General guidance in Support Center.
The following are the API code names and how they appear in the console:
\n\n low
- General guidance
\n normal
- System impaired
\n high
- Production system impaired
\n urgent
- Production system down
\n critical
- Business-critical system down
For more information, see Choosing a\n severity in the Amazon Web Services Support User Guide.
" + "smithy.api#documentation": "The name of the severity level that corresponds to the severity level code.
\nThe values returned by the API are different from the values that appear in the\n Amazon Web Services Support Center. For example, the API uses the code low
, but the name\n appears as General guidance in Support Center.
The following are the API code names and how they appear in the console:
\n\n low
- General guidance
\n normal
- System impaired
\n high
- Production system impaired
\n urgent
- Production system down
\n critical
- Business-critical system down
For more information, see Choosing a\n severity in the Amazon Web Services Support User Guide.
" } } }, @@ -2218,12 +2215,12 @@ "costOptimizing": { "target": "com.amazonaws.support#TrustedAdvisorCostOptimizingSummary", "traits": { - "smithy.api#documentation": "The summary information about cost savings for a Trusted Advisor check that is in the Cost\n Optimizing category.
" + "smithy.api#documentation": "The summary information about cost savings for a Trusted Advisor check that is in the\n Cost Optimizing category.
" } } }, "traits": { - "smithy.api#documentation": "The container for summary information that relates to the category of the Trusted Advisor\n check.
" + "smithy.api#documentation": "The container for summary information that relates to the category of the Trusted Advisor check.
" } }, "com.amazonaws.support#TrustedAdvisorCheckDescription": { @@ -2260,7 +2257,7 @@ "metadata": { "target": "com.amazonaws.support#StringList", "traits": { - "smithy.api#documentation": "The column headings for the data returned by the Trusted Advisor check. The order of the\n headings corresponds to the order of the data in the Metadata element of the TrustedAdvisorResourceDetail\n for the check. Metadata contains all the data that is\n shown in the Excel download, even in those cases where the UI shows just summary data.\n
", + "smithy.api#documentation": "The column headings for the data returned by the Trusted Advisor check. The order of\n the headings corresponds to the order of the data in the Metadata element of the TrustedAdvisorResourceDetail\n for the check. Metadata contains all the data that is\n shown in the Excel download, even in those cases where the UI shows just summary data.\n
", "smithy.api#required": {} } } @@ -2288,7 +2285,7 @@ "status": { "target": "com.amazonaws.support#String", "traits": { - "smithy.api#documentation": "The status of the Trusted Advisor check for which a refresh has been requested:\n
\n\n none
- The check is not refreshed or the non-success\n status exceeds the timeout
\n enqueued
- The check refresh requests has entered the\n refresh queue
\n processing
- The check refresh request is picked up by the\n rule processing engine
\n success
- The check is successfully refreshed
\n abandoned
- The check refresh has failed
The status of the Trusted Advisor check for which a refresh has been requested:\n
\n\n none
- The check is not refreshed or the non-success status\n exceeds the timeout
\n enqueued
- The check refresh requests has entered the refresh\n queue
\n processing
- The check refresh request is picked up by the rule\n processing engine
\n success
- The check is successfully refreshed
\n abandoned
- The check refresh has failed
A summary of a Trusted Advisor check result, including the alert status, last refresh, and\n number of resources examined.
" + "smithy.api#documentation": "A summary of a Trusted Advisor check result, including the alert status, last refresh,\n and number of resources examined.
" } }, "com.amazonaws.support#TrustedAdvisorCheckSummaryList": { @@ -2466,7 +2463,7 @@ "target": "com.amazonaws.support#Boolean", "traits": { "smithy.api#default": false, - "smithy.api#documentation": "Specifies whether the Amazon Web Services resource was ignored by Trusted Advisor because it was marked as\n suppressed by the user.
" + "smithy.api#documentation": "Specifies whether the Amazon Web Services resource was ignored by Trusted Advisor because it was\n marked as suppressed by the user.
" } }, "metadata": { @@ -2502,7 +2499,7 @@ "target": "com.amazonaws.support#Long", "traits": { "smithy.api#default": 0, - "smithy.api#documentation": "The number of Amazon Web Services resources that were flagged (listed) by the Trusted Advisor check.
", + "smithy.api#documentation": "The number of Amazon Web Services resources that were flagged (listed) by the Trusted Advisor\n check.
", "smithy.api#required": {} } }, diff --git a/codegen/sdk/aws-models/transcribe.json b/codegen/sdk/aws-models/transcribe.json index dc75131b695..ae8acd625c4 100644 --- a/codegen/sdk/aws-models/transcribe.json +++ b/codegen/sdk/aws-models/transcribe.json @@ -58,7 +58,7 @@ } }, "traits": { - "smithy.api#documentation": "A time range, in milliseconds, between two points in your media file.
\nYou can use StartTime
and EndTime
to search a custom\n segment. For example, setting StartTime
to 10000 and EndTime
\n to 50000 only searches for your specified criteria in the audio contained between the\n 10,000 millisecond mark and the 50,000 millisecond mark of your media file. You must use\n StartTime
and EndTime
as a set; that is, if you include\n one, you must include both.
You can use also First
to search from the start of the audio until the\n time that you specify, or Last
to search from the time that you specify\n until the end of the audio. For example, setting First
to 50000 only\n searches for your specified criteria in the audio contained between the start of the\n media file to the 50,000 millisecond mark. You can use First
and\n Last
independently of each other.
If you prefer to use percentage instead of milliseconds, see .
" + "smithy.api#documentation": "A time range, in milliseconds, between two points in your media file.
\nYou can use StartTime
and EndTime
to search a custom\n segment. For example, setting StartTime
to 10000 and EndTime
\n to 50000 only searches for your specified criteria in the audio contained between the\n 10,000 millisecond mark and the 50,000 millisecond mark of your media file. You must use\n StartTime
and EndTime
as a set; that is, if you include\n one, you must include both.
You can use also First
to search from the start of the audio until the\n time that you specify, or Last
to search from the time that you specify\n until the end of the audio. For example, setting First
to 50000 only\n searches for your specified criteria in the audio contained between the start of the\n media file to the 50,000 millisecond mark. You can use First
and\n Last
independently of each other.
If you prefer to use percentage instead of milliseconds, see .
" } }, "com.amazonaws.transcribe#BadRequestException": { @@ -153,13 +153,13 @@ "CallAnalyticsJobStatus": { "target": "com.amazonaws.transcribe#CallAnalyticsJobStatus", "traits": { - "smithy.api#documentation": "Provides the status of the specified Call Analytics job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
Provides the status of the specified Call Analytics job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
The language code used to create your Call Analytics job. For a list of supported\n languages and their associated language codes, refer to the Supported languages\n table.
\nIf you don't know the language spoken in your media file, you can omit this field and\n let Amazon Transcribe automatically identify the language of your media. To improve the\n accuracy of language identification, you can include several language codes and Amazon Transcribe chooses the closest match for your transcription.
" + "smithy.api#documentation": "The language code used to create your Call Analytics job. For a list of supported\n languages and their associated language codes, refer to the Supported languages\n table.
\nIf you don't know the language spoken in your media file, you can omit this field and\n let Amazon Transcribe automatically identify the language of your media. To improve the\n accuracy of language identification, you can include several language codes and Amazon Transcribe chooses the closest match for your transcription.
" } }, "MediaSampleRateHertz": { @@ -186,25 +186,25 @@ "StartTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time the specified Call Analytics job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
If CallAnalyticsJobStatus
is FAILED
,\n FailureReason
contains information about why the Call Analytics job\n request failed.
The FailureReason
field contains one of the following values:
\n Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to\n MediaFormat for a list of supported\n formats.
\n The media format provided does not match the detected media\n format
.
The media format specified in MediaFormat
doesn't match the\n format of the input file. Check the media format of your media file and correct\n the specified value.
\n Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid.\n The sample rate must be between 8,000 and 48,000 hertz.
\n The sample rate provided does not match the detected sample\n rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match\n the sample rate detected in your input media file. Check the sample rate of your\n media file and correct the specified value.
\n Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can\n process. For more information, refer to Guidelines and\n quotas.
\n\n Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process.\n For more information, refer to Guidelines and\n quotas.
\nIf CallAnalyticsJobStatus
is FAILED
,\n FailureReason
contains information about why the Call Analytics job\n request failed.
The FailureReason
field contains one of the following values:
\n Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to\n MediaFormat for a list of supported\n formats.
\n The media format provided does not match the detected media\n format
.
The media format specified in MediaFormat
doesn't match the\n format of the input file. Check the media format of your media file and correct\n the specified value.
\n Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid.\n The sample rate must be between 8,000 and 48,000 hertz.
\n The sample rate provided does not match the detected sample\n rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match\n the sample rate detected in your input media file. Check the sample rate of your\n media file and correct the specified value.
\n Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can\n process. For more information, refer to Guidelines and\n quotas.
\n\n Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process.\n For more information, refer to Guidelines and\n quotas.
\nThe confidence score associated with the language identified in your media\n file.
\nConfidence scores are values between 0 and 1; a larger value indicates a higher\n probability that the identified language correctly matches the language spoken in your\n media.
" + "smithy.api#documentation": "The confidence score associated with the language identified in your media\n file.
\nConfidence scores are values between 0 and 1; a larger value indicates a higher\n probability that the identified language correctly matches the language spoken in your\n media.
" } }, "Settings": { @@ -233,7 +233,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides detailed information about a Call Analytics job.
\nTo view the job's status, refer to CallAnalyticsJobStatus
. If the status\n is COMPLETED
, the job is finished. You can find your completed transcript\n at the URI specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
If you enabled personally identifiable information (PII) redaction, the redacted\n transcript appears at the location specified in\n RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your redacted media\n file at the location specified in the RedactedMediaFileUri
field of your\n response.
Provides detailed information about a Call Analytics job.
\nTo view the job's status, refer to CallAnalyticsJobStatus
. If the status\n is COMPLETED
, the job is finished. You can find your completed transcript\n at the URI specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
If you enabled personally identifiable information (PII) redaction, the redacted\n transcript appears at the location specified in\n RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your redacted media\n file at the location specified in the RedactedMediaFileUri
field of your\n response.
The name of the custom vocabulary filter you want to include in your Call Analytics\n transcription request. Custom vocabulary filter names are case sensitive.
\nNote that if you include VocabularyFilterName
in your request, you must\n also include VocabularyFilterMethod
.
The name of the custom vocabulary filter you want to include in your Call Analytics\n transcription request. Custom vocabulary filter names are case sensitive.
\nNote that if you include VocabularyFilterName
in your request, you must\n also include VocabularyFilterMethod
.
Specify how you want your custom vocabulary filter applied to your transcript.
\nTo replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
Specify how you want your custom vocabulary filter applied to your transcript.
\nTo replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
The name of the custom language model you want to use when processing your Call\n Analytics job. Note that custom language model names are case sensitive.
\nThe language of the specified custom language model must match the language code that\n you specify in your transcription request. If the languages don't match, the custom\n language model isn't applied. There are no errors or warnings associated with a language\n mismatch.
" + "smithy.api#documentation": "The name of the custom language model you want to use when processing your Call\n Analytics job. Note that custom language model names are case sensitive.
\nThe language of the specified custom language model must match the language code that\n you specify in your transcription request. If the languages don't match, the custom\n language model isn't applied. There are no errors or warnings associated with a language\n mismatch.
" } }, "ContentRedaction": { @@ -279,13 +279,13 @@ "LanguageOptions": { "target": "com.amazonaws.transcribe#LanguageOptions", "traits": { - "smithy.api#documentation": "You can specify two or more language codes that represent the languages you think may\n be present in your media. Including more than five is not recommended. If you're unsure\n what languages are present, do not include this parameter.
\nIncluding language options can improve the accuracy of language identification.
\nFor a list of languages supported with Call Analytics, refer to the Supported\n languages table.
\nTo transcribe speech in Modern Standard Arabic (ar-SA
), your media file\n must be encoded at a sample rate of 16,000 Hz or higher.
You can specify two or more language codes that represent the languages you think may\n be present in your media. Including more than five is not recommended. If you're unsure\n what languages are present, do not include this parameter.
\nIncluding language options can improve the accuracy of language identification.
\nFor a list of languages supported with Call Analytics, refer to the Supported\n languages table.
\nTo transcribe speech in Modern Standard Arabic (ar-SA
), your media file\n must be encoded at a sample rate of 16,000 Hz or higher.
If using automatic language identification in your request and you want to apply a\n custom language model, a custom vocabulary, or a custom vocabulary filter, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
).
\n LanguageIdSettings
supports two to five language codes. Each language\n code you include can have an associated custom language model, custom vocabulary, and\n custom vocabulary filter. The language codes that you specify must match the languages\n of the associated custom language models, custom vocabularies, and custom vocabulary\n filters.
It's recommended that you include LanguageOptions
when using\n LanguageIdSettings
to ensure that the correct language dialect is\n identified. For example, if you specify a custom vocabulary that is in\n en-US
but Amazon Transcribe determines that the language spoken in\n your media is en-AU
, your custom vocabulary is not\n applied to your transcription. If you include LanguageOptions
and include\n en-US
as the only English language dialect, your custom vocabulary\n is applied to your transcription.
If you want to include a custom language model, custom vocabulary, or custom\n vocabulary filter with your request but do not want to\n use automatic language identification, use instead the parameter with the\n
LanguageModelName
, VocabularyName
, or\n VocabularyFilterName
sub-parameters.
For a list of languages supported with Call Analytics, refer to Supported languages and \n language-specific features.
" + "smithy.api#documentation": "If using automatic language identification in your request and you want to apply a\n custom language model, a custom vocabulary, or a custom vocabulary filter, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
).
\n LanguageIdSettings
supports two to five language codes. Each language\n code you include can have an associated custom language model, custom vocabulary, and\n custom vocabulary filter. The language codes that you specify must match the languages\n of the associated custom language models, custom vocabularies, and custom vocabulary\n filters.
It's recommended that you include LanguageOptions
when using\n LanguageIdSettings
to ensure that the correct language dialect is\n identified. For example, if you specify a custom vocabulary that is in\n en-US
but Amazon Transcribe determines that the language spoken in\n your media is en-AU
, your custom vocabulary is not\n applied to your transcription. If you include LanguageOptions
and include\n en-US
as the only English language dialect, your custom vocabulary\n is applied to your transcription.
If you want to include a custom language model, custom vocabulary, or custom\n vocabulary filter with your request but do not want to\n use automatic language identification, use instead the parameter with the\n
LanguageModelName
, VocabularyName
, or\n VocabularyFilterName
sub-parameters.
For a list of languages supported with Call Analytics, refer to Supported languages and \n language-specific features.
" } } }, @@ -340,19 +340,19 @@ "CreationTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time the specified Call Analytics job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time your Call Analytics job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time your Call Analytics job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
The date and time the specified Call Analytics job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
Provides the status of your Call Analytics job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
Provides the status of your Call Analytics job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
The date and time the specified Call Analytics category was created.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified Call Analytics category was created.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified Call Analytics category was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-05T12:45:32.691000-07:00
represents 12:45 PM UTC-7 on May\n 5, 2022.
The date and time the specified Call Analytics category was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-05T12:45:32.691000-07:00
represents 12:45 PM UTC-7 on May\n 5, 2022.
Specify if you want only a redacted transcript, or if you want a redacted and an\n unredacted transcript.
\nWhen you choose redacted
\n Amazon Transcribe creates only a redacted transcript.
When you choose redacted_and_unredacted
\n Amazon Transcribe creates a redacted and an unredacted transcript (as two separate\n files).
Specify if you want only a redacted transcript, or if you want a redacted and an\n unredacted transcript.
\nWhen you choose redacted
\n Amazon Transcribe creates only a redacted transcript.
When you choose redacted_and_unredacted
\n Amazon Transcribe creates a redacted and an unredacted transcript (as two separate\n files).
Creates a new Call Analytics category.
\nAll categories are automatically applied to your Call Analytics transcriptions. Note that in\n order to apply categories to your transcriptions, you must create them before submitting your\n transcription request, as categories cannot be applied retroactively.
\nWhen creating a new category, you can use the InputType
parameter to \n label the category as a batch category (POST_CALL
) or a streaming category \n (REAL_TIME
). Batch categories can only be applied to batch transcriptions and \n streaming categories can only be applied to streaming transcriptions. If you do not include \n InputType
, your category is created as a batch category by default.
Call Analytics categories are composed of rules. For each category, you must create\n between 1 and 20 rules. Rules can include these parameters: , , , and .
\nTo update an existing category, see .
\nTo learn more about Call Analytics categories, see Creating categories for batch\n transcriptions and Creating categories for\n streaming transcriptions.
", + "smithy.api#documentation": "Creates a new Call Analytics category.
\nAll categories are automatically applied to your Call Analytics transcriptions. Note that in\n order to apply categories to your transcriptions, you must create them before submitting your\n transcription request, as categories cannot be applied retroactively.
\nWhen creating a new category, you can use the InputType
parameter to \n label the category as a batch category (POST_CALL
) or a streaming category \n (REAL_TIME
). Batch categories can only be applied to batch transcriptions and \n streaming categories can only be applied to streaming transcriptions. If you do not include \n InputType
, your category is created as a batch category by default.
Call Analytics categories are composed of rules. For each category, you must create\n between 1 and 20 rules. Rules can include these parameters: , , , and .
\nTo update an existing category, see .
\nTo learn more about Call Analytics categories, see Creating categories for batch\n transcriptions and Creating categories for\n streaming transcriptions.
", "smithy.api#http": { "method": "PUT", "uri": "/callanalyticscategories/{CategoryName}", @@ -553,7 +553,7 @@ "CategoryName": { "target": "com.amazonaws.transcribe#CategoryName", "traits": { - "smithy.api#documentation": "A unique name, chosen by you, for your Call Analytics category. It's helpful to use a\n detailed naming system that will make sense to you in the future. For example, it's\n better to use sentiment-positive-last30seconds
for a category over a\n generic name like test-category
.
Category names are case sensitive.
", + "smithy.api#documentation": "A unique name, chosen by you, for your Call Analytics category. It's helpful to use a\n detailed naming system that will make sense to you in the future. For example, it's\n better to use sentiment-positive-last30seconds
for a category over a\n generic name like test-category
.
Category names are case sensitive.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -568,7 +568,7 @@ "InputType": { "target": "com.amazonaws.transcribe#InputType", "traits": { - "smithy.api#documentation": "Choose whether you want to create a streaming or a batch category for your Call Analytics \n transcription.
\nSpecifying POST_CALL
assigns your category to batch transcriptions; \n categories with this input type cannot be applied to streaming (real-time) \n transcriptions.
Specifying REAL_TIME
assigns your category to streaming transcriptions; \n categories with this input type cannot be applied to batch (post-call)\n transcriptions.
If you do not include InputType
, your category is created as a batch \n category by default.
Choose whether you want to create a streaming or a batch category for your Call Analytics \n transcription.
\nSpecifying POST_CALL
assigns your category to batch transcriptions; \n categories with this input type cannot be applied to streaming (real-time) \n transcriptions.
Specifying REAL_TIME
assigns your category to streaming transcriptions; \n categories with this input type cannot be applied to batch (post-call)\n transcriptions.
If you do not include InputType
, your category is created as a batch \n category by default.
Creates a new custom language model.
\nWhen creating a new custom language model, you must specify:
\nIf you want a Wideband (audio sample rates over 16,000 Hz) or Narrowband\n (audio sample rates under 16,000 Hz) base model
\nThe location of your training and tuning files (this must be an Amazon S3 URI)
\nThe language of your model
\nA unique name for your model
\nCreates a new custom language model.
\nWhen creating a new custom language model, you must specify:
\nIf you want a Wideband (audio sample rates over 16,000 Hz) or Narrowband\n (audio sample rates under 16,000 Hz) base model
\nThe location of your training and tuning files (this must be an Amazon S3 URI)
\nThe language of your model
\nA unique name for your model
\nThe language code that represents the language of your model. Each custom language\n model must contain terms in only one language, and the language you select for your\n custom language model must match the language of your training and tuning data.
\nFor a list of supported languages and their associated language codes, refer to the\n Supported languages table. Note that\n US English\n (en-US
) is the only language supported with Amazon Transcribe\n Medical.
A custom language model can only be used to transcribe files in the same language as\n the model. For example, if you create a custom language model using US English\n (en-US
), you can only apply this model to files that contain English\n audio.
The language code that represents the language of your model. Each custom language\n model must contain terms in only one language, and the language you select for your\n custom language model must match the language of your training and tuning data.
\nFor a list of supported languages and their associated language codes, refer to the\n Supported languages table. Note that\n US English\n (en-US
) is the only language supported with Amazon Transcribe\n Medical.
A custom language model can only be used to transcribe files in the same language as\n the model. For example, if you create a custom language model using US English\n (en-US
), you can only apply this model to files that contain English\n audio.
The Amazon Transcribe standard language model, or base model, used to create your\n custom language model. Amazon Transcribe offers two options for base models: Wideband\n and Narrowband.
\nIf the audio you want to transcribe has a sample rate of 16,000 Hz or greater, choose\n WideBand
. To transcribe audio with a sample rate less than 16,000 Hz,\n choose NarrowBand
.
The Amazon Transcribe standard language model, or base model, used to create your\n custom language model. Amazon Transcribe offers two options for base models: Wideband\n and Narrowband.
\nIf the audio you want to transcribe has a sample rate of 16,000 Hz or greater, choose\n WideBand
. To transcribe audio with a sample rate less than 16,000 Hz,\n choose NarrowBand
.
A unique name, chosen by you, for your custom language model.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom language model with\n the same name as an existing custom language model, you get a\n ConflictException
error.
A unique name, chosen by you, for your custom language model.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom language model with\n the same name as an existing custom language model, you get a\n ConflictException
error.
Contains the Amazon S3 location of the training data you want to use to create\n a new custom language model, and permissions to access this location.
\nWhen using InputDataConfig
, you must include these sub-parameters:\n S3Uri
, which is the Amazon S3 location of your training data,\n and DataAccessRoleArn
, which is the Amazon Resource Name (ARN) of the role\n that has permission to access your specified Amazon S3 location. You can\n optionally include TuningDataS3Uri
, which is the Amazon S3 location\n of your tuning data. If you specify different Amazon S3 locations for training\n and tuning data, the ARN you use must have permissions to access both locations.
Contains the Amazon S3 location of the training data you want to use to create\n a new custom language model, and permissions to access this location.
\nWhen using InputDataConfig
, you must include these sub-parameters:\n S3Uri
, which is the Amazon S3 location of your training data,\n and DataAccessRoleArn
, which is the Amazon Resource Name (ARN) of the role\n that has permission to access your specified Amazon S3 location. You can\n optionally include TuningDataS3Uri
, which is the Amazon S3 location\n of your tuning data. If you specify different Amazon S3 locations for training\n and tuning data, the ARN you use must have permissions to access both locations.
Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n language model at the time you create this new model.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n language model at the time you create this new model.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" } } } @@ -713,7 +713,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new custom medical vocabulary.
\nBefore creating a new custom medical vocabulary, you must first upload a text file\n that contains your new entries, phrases, and terms into an Amazon S3 bucket.\n Note that this differs from , where you can\n include a list of terms within your request using the Phrases
flag;\n CreateMedicalVocabulary
does not support the Phrases
\n flag.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary request\n fails. Refer to Character Sets for Custom Vocabularies to get the character set for your\n language.
\nFor more information, see Custom\n vocabularies.
", + "smithy.api#documentation": "Creates a new custom medical vocabulary.
\nBefore creating a new custom medical vocabulary, you must first upload a text file\n that contains your new entries, phrases, and terms into an Amazon S3 bucket.\n Note that this differs from , where you can\n include a list of terms within your request using the Phrases
flag;\n CreateMedicalVocabulary
does not support the Phrases
\n flag.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary request\n fails. Refer to Character Sets for Custom Vocabularies to get the character set for your\n language.
\nFor more information, see Custom\n vocabularies.
", "smithy.api#http": { "method": "PUT", "uri": "/medicalvocabularies/{VocabularyName}", @@ -727,7 +727,7 @@ "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "A unique name, chosen by you, for your new custom medical vocabulary.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom medical vocabulary\n with the same name as an existing custom medical vocabulary, you get a\n ConflictException
error.
A unique name, chosen by you, for your new custom medical vocabulary.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom medical vocabulary\n with the same name as an existing custom medical vocabulary, you get a\n ConflictException
error.
The Amazon S3 location (URI) of the text file that contains your custom\n medical vocabulary. The URI must be in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
The Amazon S3 location (URI) of the text file that contains your custom\n medical vocabulary. The URI must be in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n medical vocabulary at the time you create this new custom vocabulary.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n medical vocabulary at the time you create this new custom vocabulary.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" } } } @@ -778,7 +778,7 @@ "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time you created your custom medical vocabulary.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time you created your custom medical vocabulary.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
Creates a new custom vocabulary.
\nWhen creating a new custom vocabulary, you can either upload a text file that contains\n your new entries, phrases, and terms into an Amazon S3 bucket and include the\n URI in your request. Or you can include a list of terms directly in your request using\n the Phrases
flag.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary request\n fails. Refer to Character Sets for Custom Vocabularies to get the character set for your\n language.
\nFor more information, see Custom\n vocabularies.
", + "smithy.api#documentation": "Creates a new custom vocabulary.
\nWhen creating a new custom vocabulary, you can either upload a text file that contains\n your new entries, phrases, and terms into an Amazon S3 bucket and include the\n URI in your request. Or you can include a list of terms directly in your request using\n the Phrases
flag.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary request\n fails. Refer to Character Sets for Custom Vocabularies to get the character set for your\n language.
\nFor more information, see Custom\n vocabularies.
", "smithy.api#http": { "method": "PUT", "uri": "/vocabularies/{VocabularyName}", @@ -843,7 +843,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a new custom vocabulary filter.
\nYou can use custom vocabulary filters to mask, delete, or flag specific words from\n your transcript. Custom vocabulary filters are commonly used to mask profanity in\n transcripts.
\nEach language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
\nFor more information, see Vocabulary\n filtering.
", + "smithy.api#documentation": "Creates a new custom vocabulary filter.
\nYou can use custom vocabulary filters to mask, delete, or flag specific words from\n your transcript. Custom vocabulary filters are commonly used to mask profanity in\n transcripts.
\nEach language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
\nFor more information, see Vocabulary\n filtering.
", "smithy.api#http": { "method": "POST", "uri": "/vocabularyFilters/{VocabularyFilterName}", @@ -857,7 +857,7 @@ "VocabularyFilterName": { "target": "com.amazonaws.transcribe#VocabularyFilterName", "traits": { - "smithy.api#documentation": "A unique name, chosen by you, for your new custom vocabulary filter.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom vocabulary filter with\n the same name as an existing custom vocabulary filter, you get a\n ConflictException
error.
A unique name, chosen by you, for your new custom vocabulary filter.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom vocabulary filter with\n the same name as an existing custom vocabulary filter, you get a\n ConflictException
error.
The language code that represents the language of the entries in your vocabulary\n filter. Each custom vocabulary filter must contain terms in only one language.
\nA custom vocabulary filter can only be used to transcribe files in the same language\n as the filter. For example, if you create a custom vocabulary filter using US English\n (en-US
), you can only apply this filter to files that contain English\n audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
", + "smithy.api#documentation": "The language code that represents the language of the entries in your vocabulary\n filter. Each custom vocabulary filter must contain terms in only one language.
\nA custom vocabulary filter can only be used to transcribe files in the same language\n as the filter. For example, if you create a custom vocabulary filter using US English\n (en-US
), you can only apply this filter to files that contain English\n audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
", "smithy.api#required": {} } }, "Words": { "target": "com.amazonaws.transcribe#Words", "traits": { - "smithy.api#documentation": "Use this parameter if you want to create your custom vocabulary filter by including\n all desired terms, as comma-separated values, within your request. The other option for\n creating your vocabulary filter is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use\n VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" + "smithy.api#documentation": "Use this parameter if you want to create your custom vocabulary filter by including\n all desired terms, as comma-separated values, within your request. The other option for\n creating your vocabulary filter is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use\n VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" } }, "VocabularyFilterFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the text file that contains your custom vocabulary\n filter terms. The URI must be located in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-filter-file.txt
\n
Note that if you include VocabularyFilterFileUri
in your request, you\n cannot use Words
; you must choose one or the other.
The Amazon S3 location of the text file that contains your custom vocabulary\n filter terms. The URI must be located in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-filter-file.txt
\n
Note that if you include VocabularyFilterFileUri
in your request, you\n cannot use Words
; you must choose one or the other.
Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n vocabulary filter at the time you create this new vocabulary filter.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n vocabulary filter at the time you create this new vocabulary filter.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" } } } @@ -907,7 +907,7 @@ "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time you created your custom vocabulary filter.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time you created your custom vocabulary filter.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
A unique name, chosen by you, for your new custom vocabulary.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom vocabulary with the\n same name as an existing custom vocabulary, you get a ConflictException
\n error.
A unique name, chosen by you, for your new custom vocabulary.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new custom vocabulary with the\n same name as an existing custom vocabulary, you get a ConflictException
\n error.
The language code that represents the language of the entries in your custom\n vocabulary. Each custom vocabulary must contain terms in only one language.
\nA custom vocabulary can only be used to transcribe files in the same language as the\n custom vocabulary. For example, if you create a custom vocabulary using US English\n (en-US
), you can only apply this custom vocabulary to files that\n contain English audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
", + "smithy.api#documentation": "The language code that represents the language of the entries in your custom\n vocabulary. Each custom vocabulary must contain terms in only one language.
\nA custom vocabulary can only be used to transcribe files in the same language as the\n custom vocabulary. For example, if you create a custom vocabulary using US English\n (en-US
), you can only apply this custom vocabulary to files that\n contain English audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
", "smithy.api#required": {} } }, "Phrases": { "target": "com.amazonaws.transcribe#Phrases", "traits": { - "smithy.api#documentation": "Use this parameter if you want to create your custom vocabulary by including all\n desired terms, as comma-separated values, within your request. The other option for\n creating your custom vocabulary is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use\n VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" + "smithy.api#documentation": "Use this parameter if you want to create your custom vocabulary by including all\n desired terms, as comma-separated values, within your request. The other option for\n creating your custom vocabulary is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use\n VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" } }, "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the text file that contains your custom vocabulary.\n The URI must be located in the same Amazon Web Services Region as the resource you're\n calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
Note that if you include VocabularyFileUri
in your request, you cannot\n use the Phrases
flag; you must choose one or the other.
The Amazon S3 location of the text file that contains your custom vocabulary.\n The URI must be located in the same Amazon Web Services Region as the resource you're\n calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
Note that if you include VocabularyFileUri
in your request, you cannot\n use the Phrases
flag; you must choose one or the other.
Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n vocabulary at the time you create this new custom vocabulary.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new custom\n vocabulary at the time you create this new custom vocabulary.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" } } } @@ -974,7 +974,7 @@ "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time you created your custom vocabulary.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time you created your custom vocabulary.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
Provides information about the specified custom language model.
\nThis operation also shows if the base language model that you used to create your\n custom language model has been updated. If Amazon Transcribe has updated the base\n model, you can create a new custom language model using the updated base model.
\nIf you tried to create a new custom language model and the request wasn't successful,\n you can use DescribeLanguageModel
to help identify the reason for this\n failure.
Provides information about the specified custom language model.
\nThis operation also shows if the base language model that you used to create your\n custom language model has been updated. If Amazon Transcribe has updated the base\n model, you can create a new custom language model using the updated base model.
\nIf you tried to create a new custom language model and the request wasn't successful,\n you can use DescribeLanguageModel
to help identify the reason for this\n failure.
Provides information about the specified custom language\n model.
\nThis parameter also shows if the base language model you used to create your custom\n language model has been updated. If Amazon Transcribe has updated the base model, you\n can create a new custom language model using the updated base model.
\nIf you tried to create a new custom language model and the request wasn't successful,\n you can use this DescribeLanguageModel
to help identify the reason for this\n failure.
Provides information about the specified custom language\n model.
\nThis parameter also shows if the base language model you used to create your custom\n language model has been updated. If Amazon Transcribe has updated the base model, you\n can create a new custom language model using the updated base model.
\nIf you tried to create a new custom language model and the request wasn't successful,\n you can use this DescribeLanguageModel
to help identify the reason for this\n failure.
Provides information about the specified Call Analytics category.
\nTo get a list of your Call Analytics categories, use the operation.
", + "smithy.api#documentation": "Provides information about the specified Call Analytics category.
\nTo get a list of your Call Analytics categories, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/callanalyticscategories/{CategoryName}", @@ -1485,7 +1485,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the specified Call Analytics job.
\nTo view the job's status, refer to CallAnalyticsJobStatus
. If the status\n is COMPLETED
, the job is finished. You can find your completed transcript\n at the URI specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
If you enabled personally identifiable information (PII) redaction, the redacted\n transcript appears at the location specified in\n RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your redacted media\n file at the location specified in RedactedMediaFileUri
.
To get a list of your Call Analytics jobs, use the operation.
", + "smithy.api#documentation": "Provides information about the specified Call Analytics job.
\nTo view the job's status, refer to CallAnalyticsJobStatus
. If the status\n is COMPLETED
, the job is finished. You can find your completed transcript\n at the URI specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
If you enabled personally identifiable information (PII) redaction, the redacted\n transcript appears at the location specified in\n RedactedTranscriptFileUri
.
If you chose to redact the audio in your media file, you can find your redacted media\n file at the location specified in RedactedMediaFileUri
.
To get a list of your Call Analytics jobs, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/callanalyticsjobs/{CallAnalyticsJobName}", @@ -1540,7 +1540,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the specified medical transcription job.
\nTo view the status of the specified medical transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished. You can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
To get a list of your medical transcription jobs, use the operation.
", + "smithy.api#documentation": "Provides information about the specified medical transcription job.
\nTo view the status of the specified medical transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished. You can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
To get a list of your medical transcription jobs, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/medicaltranscriptionjobs/{MedicalTranscriptionJobName}", @@ -1595,7 +1595,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the specified custom medical vocabulary.
\nTo view the status of the specified custom medical vocabulary, check the\n VocabularyState
field. If the status is READY
, your custom\n vocabulary is available to use. If the status is FAILED
,\n FailureReason
provides details on why your vocabulary failed.
To get a list of your custom medical vocabularies, use the operation.
", + "smithy.api#documentation": "Provides information about the specified custom medical vocabulary.
\nTo view the status of the specified custom medical vocabulary, check the\n VocabularyState
field. If the status is READY
, your custom\n vocabulary is available to use. If the status is FAILED
,\n FailureReason
provides details on why your vocabulary failed.
To get a list of your custom medical vocabularies, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/medicalvocabularies/{VocabularyName}", @@ -1640,7 +1640,7 @@ "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time the specified custom medical vocabulary was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom medical vocabulary was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
Provides information about the specified transcription job.
\nTo view the status of the specified transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished. You can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
If you enabled content redaction, the redacted transcript can be found at the location\n specified in RedactedTranscriptFileUri
.
To get a list of your transcription jobs, use the operation.
", + "smithy.api#documentation": "Provides information about the specified transcription job.
\nTo view the status of the specified transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished. You can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
If you enabled content redaction, the redacted transcript can be found at the location\n specified in RedactedTranscriptFileUri
.
To get a list of your transcription jobs, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/transcriptionjobs/{TranscriptionJobName}", @@ -1735,7 +1735,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the specified custom vocabulary.
\nTo view the status of the specified custom vocabulary, check the\n VocabularyState
field. If the status is READY
, your custom\n vocabulary is available to use. If the status is FAILED
,\n FailureReason
provides details on why your custom vocabulary\n failed.
To get a list of your custom vocabularies, use the operation.
", + "smithy.api#documentation": "Provides information about the specified custom vocabulary.
\nTo view the status of the specified custom vocabulary, check the\n VocabularyState
field. If the status is READY
, your custom\n vocabulary is available to use. If the status is FAILED
,\n FailureReason
provides details on why your custom vocabulary\n failed.
To get a list of your custom vocabularies, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/vocabularies/{VocabularyName}", @@ -1766,7 +1766,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides information about the specified custom vocabulary filter.
\nTo get a list of your custom vocabulary filters, use the operation.
", + "smithy.api#documentation": "Provides information about the specified custom vocabulary filter.
\nTo get a list of your custom vocabulary filters, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/vocabularyFilters/{VocabularyFilterName}", @@ -1805,7 +1805,7 @@ "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time the specified custom vocabulary filter was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary filter was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The Amazon S3 location (URI) of the text files you want to use to train your\n custom language model.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-model-training-data/
\n
The Amazon S3 location (URI) of the text files you want to use to train your\n custom language model.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-model-training-data/
\n
The Amazon S3 location (URI) of the text files you want to use to tune your\n custom language model.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-model-tuning-data/
\n
The Amazon S3 location (URI) of the text files you want to use to tune your\n custom language model.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-model-tuning-data/
\n
The Amazon Resource Name (ARN) of an IAM role that has permissions to\n access the Amazon S3 bucket that contains your input files. If the role that you\n specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
\nIAM role ARNs have the format\n arn:partition:iam::account:role/role-name-with-path
. For example:\n arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM\n ARNs.
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to\n access the Amazon S3 bucket that contains your input files. If the role that you\n specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
\nIAM role ARNs have the format\n arn:partition:iam::account:role/role-name-with-path
. For example:\n arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM\n ARNs.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Contains the Amazon S3 location of the training data you want to use to create\n a new custom language model, and permissions to access this location.
\nWhen using InputDataConfig
, you must include these sub-parameters:\n S3Uri
and DataAccessRoleArn
. You can optionally include\n TuningDataS3Uri
.
Contains the Amazon S3 location of the training data you want to use to create\n a new custom language model, and permissions to access this location.
\nWhen using InputDataConfig
, you must include these sub-parameters:\n S3Uri
and DataAccessRoleArn
. You can optionally include\n TuningDataS3Uri
.
Flag the presence or absence of interruptions in your Call Analytics transcription\n output.
\nRules using InterruptionFilter
are designed to match:
Instances where an agent interrupts a customer
\nInstances where a customer interrupts an agent
\nEither participant interrupting the other
\nA lack of interruptions
\nSee Rule criteria for batch \n categories for usage examples.
" + "smithy.api#documentation": "Flag the presence or absence of interruptions in your Call Analytics transcription\n output.
\nRules using InterruptionFilter
are designed to match:
Instances where an agent interrupts a customer
\nInstances where a customer interrupts an agent
\nEither participant interrupting the other
\nA lack of interruptions
\nSee Rule criteria for batch \n categories for usage examples.
" } }, "com.amazonaws.transcribe#JobExecutionSettings": { @@ -1975,18 +1975,18 @@ "AllowDeferredExecution": { "target": "com.amazonaws.transcribe#Boolean", "traits": { - "smithy.api#documentation": "Makes it possible to enable job queuing when your concurrent request limit is\n exceeded. When AllowDeferredExecution
is set to true
,\n transcription job requests are placed in a queue until the number of jobs falls below\n the concurrent request limit. If AllowDeferredExecution
is set to\n false
and the number of transcription job requests exceed the\n concurrent request limit, you get a LimitExceededException
error.
Note that job queuing is enabled by default for Call Analytics jobs.
\nIf you include AllowDeferredExecution
in your request, you must also\n include DataAccessRoleArn
.
Makes it possible to enable job queuing when your concurrent request limit is\n exceeded. When AllowDeferredExecution
is set to true
,\n transcription job requests are placed in a queue until the number of jobs falls below\n the concurrent request limit. If AllowDeferredExecution
is set to\n false
and the number of transcription job requests exceed the\n concurrent request limit, you get a LimitExceededException
error.
Note that job queuing is enabled by default for Call Analytics jobs.
\nIf you include AllowDeferredExecution
in your request, you must also\n include DataAccessRoleArn
.
The Amazon Resource Name (ARN) of an IAM role that has permissions to\n access the Amazon S3 bucket that contains your input files. If the role that you\n specify doesn’t have the appropriate permissions to access the specified Amazon S3 \n location, your request fails.
\nIAM role ARNs have the format\n arn:partition:iam::account:role/role-name-with-path
. For example:\n arn:aws:iam::111122223333:role/Admin
. For more information, see IAM\n ARNs.
Note that if you include DataAccessRoleArn
in your request, you must also\n include AllowDeferredExecution
.
The Amazon Resource Name (ARN) of an IAM role that has permissions to\n access the Amazon S3 bucket that contains your input files. If the role that you\n specify doesn’t have the appropriate permissions to access the specified Amazon S3 \n location, your request fails.
\nIAM role ARNs have the format\n arn:partition:iam::account:role/role-name-with-path
. For example:\n arn:aws:iam::111122223333:role/Admin
. For more information, see IAM\n ARNs.
Note that if you include DataAccessRoleArn
in your request, you must also\n include AllowDeferredExecution
.
Makes it possible to control how your transcription job is processed. Currently, the\n only JobExecutionSettings
modification you can choose is enabling job\n queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also\n include the sub-parameters: AllowDeferredExecution
and\n DataAccessRoleArn
.
Makes it possible to control how your transcription job is processed. Currently, the\n only JobExecutionSettings
modification you can choose is enabling job\n queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also\n include the sub-parameters: AllowDeferredExecution
and\n DataAccessRoleArn
.
The name of the custom vocabulary you want to use when processing your transcription\n job. Custom vocabulary names are case sensitive.
\nThe language of the specified custom vocabulary must match the language code that you\n specify in your transcription request. If the languages don't match, the custom\n vocabulary isn't applied. There are no errors or warnings associated with a language\n mismatch.
" + "smithy.api#documentation": "The name of the custom vocabulary you want to use when processing your transcription\n job. Custom vocabulary names are case sensitive.
\nThe language of the specified custom vocabulary must match the language code that you\n specify in your transcription request. If the languages don't match, the custom\n vocabulary isn't applied. There are no errors or warnings associated with a language\n mismatch.
" } }, "VocabularyFilterName": { "target": "com.amazonaws.transcribe#VocabularyFilterName", "traits": { - "smithy.api#documentation": "The name of the custom vocabulary filter you want to use when processing your\n transcription job. Custom vocabulary filter names are case sensitive.
\nThe language of the specified custom vocabulary filter must match the language code\n that you specify in your transcription request. If the languages don't match, the custom\n vocabulary filter isn't applied. There are no errors or warnings associated with a\n language mismatch.
\nNote that if you include VocabularyFilterName
in your request, you must\n also include VocabularyFilterMethod
.
The name of the custom vocabulary filter you want to use when processing your\n transcription job. Custom vocabulary filter names are case sensitive.
\nThe language of the specified custom vocabulary filter must match the language code\n that you specify in your transcription request. If the languages don't match, the custom\n vocabulary filter isn't applied. There are no errors or warnings associated with a\n language mismatch.
\nNote that if you include VocabularyFilterName
in your request, you must\n also include VocabularyFilterMethod
.
The name of the custom language model you want to use when processing your\n transcription job. Note that custom language model names are case sensitive.
\nThe language of the specified custom language model must match the language code that\n you specify in your transcription request. If the languages don't match, the custom\n language model isn't applied. There are no errors or warnings associated with a language\n mismatch.
" + "smithy.api#documentation": "The name of the custom language model you want to use when processing your\n transcription job. Note that custom language model names are case sensitive.
\nThe language of the specified custom language model must match the language code that\n you specify in your transcription request. If the languages don't match, the custom\n language model isn't applied. There are no errors or warnings associated with a language\n mismatch.
" } } }, "traits": { - "smithy.api#documentation": "If using automatic language identification in your request and you want to apply a\n custom language model, a custom vocabulary, or a custom vocabulary filter, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
). Note that multi-language identification\n (IdentifyMultipleLanguages
) doesn't support custom language\n models.
\n LanguageIdSettings
supports two to five language codes. Each language\n code you include can have an associated custom language model, custom vocabulary, and\n custom vocabulary filter. The language codes that you specify must match the languages\n of the associated custom language models, custom vocabularies, and custom vocabulary\n filters.
It's recommended that you include LanguageOptions
when using\n LanguageIdSettings
to ensure that the correct language dialect is\n identified. For example, if you specify a custom vocabulary that is in\n en-US
but Amazon Transcribe determines that the language spoken in\n your media is en-AU
, your custom vocabulary is not\n applied to your transcription. If you include LanguageOptions
and include\n en-US
as the only English language dialect, your custom vocabulary\n is applied to your transcription.
If you want to include a custom language model with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
LanguageModelName
sub-parameter. If you want to include a custom\n vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
VocabularyName
or VocabularyFilterName
(or both)\n sub-parameter.
If using automatic language identification in your request and you want to apply a\n custom language model, a custom vocabulary, or a custom vocabulary filter, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
). Note that multi-language identification\n (IdentifyMultipleLanguages
) doesn't support custom language\n models.
\n LanguageIdSettings
supports two to five language codes. Each language\n code you include can have an associated custom language model, custom vocabulary, and\n custom vocabulary filter. The language codes that you specify must match the languages\n of the associated custom language models, custom vocabularies, and custom vocabulary\n filters.
It's recommended that you include LanguageOptions
when using\n LanguageIdSettings
to ensure that the correct language dialect is\n identified. For example, if you specify a custom vocabulary that is in\n en-US
but Amazon Transcribe determines that the language spoken in\n your media is en-AU
, your custom vocabulary is not\n applied to your transcription. If you include LanguageOptions
and include\n en-US
as the only English language dialect, your custom vocabulary\n is applied to your transcription.
If you want to include a custom language model with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
LanguageModelName
sub-parameter. If you want to include a custom\n vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
VocabularyName
or VocabularyFilterName
(or both)\n sub-parameter.
A unique name, chosen by you, for your custom language model.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account.
" + "smithy.api#documentation": "A unique name, chosen by you, for your custom language model.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account.
" } }, "CreateTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time the specified custom language model was created.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom language model was created.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom language model was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom language model was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The language code used to create your custom language model. Each custom language\n model must contain terms in only one language, and the language you select for your\n custom language model must match the language of your training and tuning data.
\nFor a list of supported languages and their associated language codes, refer to the\n Supported languages table. Note that U.S. English (en-US
) is\n the only language supported with Amazon Transcribe Medical.
The language code used to create your custom language model. Each custom language\n model must contain terms in only one language, and the language you select for your\n custom language model must match the language of your training and tuning data.
\nFor a list of supported languages and their associated language codes, refer to the\n Supported languages table. Note that U.S. English (en-US
) is\n the only language supported with Amazon Transcribe Medical.
Shows if a more current base model is available for use with the specified custom\n language model.
\nIf false
, your custom language model is using the most up-to-date base\n model.
If true
, there is a newer base model available than the one your language\n model is using.
Note that to update a base model, you must recreate the custom language model using\n the new base model. Base model upgrades for existing custom language models are not\n supported.
" + "smithy.api#documentation": "Shows if a more current base model is available for use with the specified custom\n language model.
\nIf false
, your custom language model is using the most up-to-date base\n model.
If true
, there is a newer base model available than the one your language\n model is using.
Note that to update a base model, you must recreate the custom language model using\n the new base model. Base model upgrades for existing custom language models are not\n supported.
" } }, "FailureReason": { @@ -2426,7 +2414,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of Call Analytics categories, including all rules that make up each\n category.
\nTo get detailed information about a specific Call Analytics category, use the operation.
", + "smithy.api#documentation": "Provides a list of Call Analytics categories, including all rules that make up each\n category.
\nTo get detailed information about a specific Call Analytics category, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/callanalyticscategories", @@ -2495,7 +2483,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of Call Analytics jobs that match the specified criteria. If no\n criteria are specified, all Call Analytics jobs are returned.
\nTo get detailed information about a specific Call Analytics job, use the operation.
", + "smithy.api#documentation": "Provides a list of Call Analytics jobs that match the specified criteria. If no\n criteria are specified, all Call Analytics jobs are returned.
\nTo get detailed information about a specific Call Analytics job, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/callanalyticsjobs", @@ -2584,7 +2572,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of custom language models that match the specified criteria. If no\n criteria are specified, all custom language models are returned.
\nTo get detailed information about a specific custom language model, use the operation.
", + "smithy.api#documentation": "Provides a list of custom language models that match the specified criteria. If no\n criteria are specified, all custom language models are returned.
\nTo get detailed information about a specific custom language model, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/languagemodels", @@ -2667,7 +2655,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of medical transcription jobs that match the specified criteria. If no\n criteria are specified, all medical transcription jobs are returned.
\nTo get detailed information about a specific medical transcription job, use the operation.
", + "smithy.api#documentation": "Provides a list of medical transcription jobs that match the specified criteria. If no\n criteria are specified, all medical transcription jobs are returned.
\nTo get detailed information about a specific medical transcription job, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/medicaltranscriptionjobs", @@ -2756,7 +2744,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of custom medical vocabularies that match the specified criteria. If\n no criteria are specified, all custom medical vocabularies are returned.
\nTo get detailed information about a specific custom medical vocabulary, use the operation.
", + "smithy.api#documentation": "Provides a list of custom medical vocabularies that match the specified criteria. If\n no criteria are specified, all custom medical vocabularies are returned.
\nTo get detailed information about a specific custom medical vocabulary, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/medicalvocabularies", @@ -2848,7 +2836,7 @@ } ], "traits": { - "smithy.api#documentation": "Lists all tags associated with the specified transcription job, vocabulary, model, or\n resource.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
", + "smithy.api#documentation": "Lists all tags associated with the specified transcription job, vocabulary, model, or\n resource.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
", "smithy.api#http": { "method": "GET", "uri": "/tags/{ResourceArn}", @@ -2862,7 +2850,7 @@ "ResourceArn": { "target": "com.amazonaws.transcribe#TranscribeArn", "traits": { - "smithy.api#documentation": "Returns a list of all tags associated with the specified Amazon Resource Name (ARN).\n ARNs have the format\n arn:partition:service:region:account-id:resource-type/resource-id
.
For example,\n arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
,\n medical-transcription-job
, vocabulary
,\n medical-vocabulary
, vocabulary-filter
, and\n language-model
.
Returns a list of all tags associated with the specified Amazon Resource Name (ARN).\n ARNs have the format\n arn:partition:service:region:account-id:resource-type/resource-id
.
For example,\n arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
,\n medical-transcription-job
, vocabulary
,\n medical-vocabulary
, vocabulary-filter
, and\n language-model
.
Provides a list of transcription jobs that match the specified criteria. If no\n criteria are specified, all transcription jobs are returned.
\nTo get detailed information about a specific transcription job, use the operation.
", + "smithy.api#documentation": "Provides a list of transcription jobs that match the specified criteria. If no\n criteria are specified, all transcription jobs are returned.
\nTo get detailed information about a specific transcription job, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/transcriptionjobs", @@ -2995,7 +2983,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of custom vocabularies that match the specified criteria. If no\n criteria are specified, all custom vocabularies are returned.
\nTo get detailed information about a specific custom vocabulary, use the operation.
", + "smithy.api#documentation": "Provides a list of custom vocabularies that match the specified criteria. If no\n criteria are specified, all custom vocabularies are returned.
\nTo get detailed information about a specific custom vocabulary, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/vocabularies", @@ -3084,7 +3072,7 @@ } ], "traits": { - "smithy.api#documentation": "Provides a list of custom vocabulary filters that match the specified criteria. If no\n criteria are specified, all custom vocabularies are returned.
\nTo get detailed information about a specific custom vocabulary filter, use the operation.
", + "smithy.api#documentation": "Provides a list of custom vocabulary filters that match the specified criteria. If no\n criteria are specified, all custom vocabularies are returned.
\nTo get detailed information about a specific custom vocabulary filter, use the operation.
", "smithy.api#http": { "method": "GET", "uri": "/vocabularyFilters", @@ -3173,18 +3161,18 @@ "MediaFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the media file you want to transcribe. For\n example:
\n\n s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
\n
\n s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
\n
Note that the Amazon S3 bucket that contains your input media must be located\n in the same Amazon Web Services Region where you're making your transcription\n request.
" + "smithy.api#documentation": "The Amazon S3 location of the media file you want to transcribe. For\n example:
\n\n s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
\n
\n s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
\n
Note that the Amazon S3 bucket that contains your input media must be located\n in the same Amazon Web Services Region where you're making your transcription\n request.
" } }, "RedactedMediaFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the media file you want to redact. For\n example:
\n\n s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
\n
\n s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
\n
Note that the Amazon S3 bucket that contains your input media must be located\n in the same Amazon Web Services Region where you're making your transcription\n request.
\n\n RedactedMediaFileUri
produces a redacted audio file in addition to a \n redacted transcript. It is only supported for Call Analytics\n (StartCallAnalyticsJob
) transcription requests.
The Amazon S3 location of the media file you want to redact. For\n example:
\n\n s3://DOC-EXAMPLE-BUCKET/my-media-file.flac
\n
\n s3://DOC-EXAMPLE-BUCKET/media-files/my-media-file.flac
\n
Note that the Amazon S3 bucket that contains your input media must be located\n in the same Amazon Web Services Region where you're making your transcription\n request.
\n\n RedactedMediaFileUri
produces a redacted audio file in addition to a \n redacted transcript. It is only supported for Call Analytics\n (StartCallAnalyticsJob
) transcription requests.
Describes the Amazon S3 location of the media file you want to use in your\n request.
\nFor information on supported media formats, refer to the MediaFormat parameter or the Media formats section \n in the Amazon S3 Developer Guide.
" + "smithy.api#documentation": "Describes the Amazon S3 location of the media file you want to use in your\n request.
\nFor information on supported media formats, refer to the MediaFormat parameter or the Media formats section \n in the Amazon S3 Developer Guide.
" } }, "com.amazonaws.transcribe#MediaFormat": { @@ -3269,7 +3257,7 @@ "TranscriptFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of your transcript. You can use this URI to access or\n download your transcript.
\nNote that this is the Amazon S3 location you specified in your request using the \n OutputBucketName
parameter.
The Amazon S3 location of your transcript. You can use this URI to access or\n download your transcript.
\nNote that this is the Amazon S3 location you specified in your request using the \n OutputBucketName
parameter.
Provides the status of the specified medical transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
Provides the status of the specified medical transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
The date and time the specified medical transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
If TranscriptionJobStatus
is FAILED
,\n FailureReason
contains information about why the transcription job\n request failed.
The FailureReason
field contains one of the following values:
\n Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to\n MediaFormat for a list of supported\n formats.
\n The media format provided does not match the detected media\n format
.
The media format specified in MediaFormat
doesn't match the\n format of the input file. Check the media format of your media file and correct\n the specified value.
\n Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid.\n The sample rate must be between 16,000 and 48,000 hertz.
\n The sample rate provided does not match the detected sample\n rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match\n the sample rate detected in your input media file. Check the sample rate of your\n media file and correct the specified value.
\n Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can\n process. For more information, refer to Guidelines and\n quotas.
\n\n Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process.\n For more information, refer to Guidelines and\n quotas.
\nIf TranscriptionJobStatus
is FAILED
,\n FailureReason
contains information about why the transcription job\n request failed.
The FailureReason
field contains one of the following values:
\n Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to\n MediaFormat for a list of supported\n formats.
\n The media format provided does not match the detected media\n format
.
The media format specified in MediaFormat
doesn't match the\n format of the input file. Check the media format of your media file and correct\n the specified value.
\n Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid.\n The sample rate must be between 16,000 and 48,000 hertz.
\n The sample rate provided does not match the detected sample\n rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match\n the sample rate detected in your input media file. Check the sample rate of your\n media file and correct the specified value.
\n Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can\n process. For more information, refer to Guidelines and\n quotas.
\n\n Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process.\n For more information, refer to Guidelines and\n quotas.
\nProvides detailed information about a medical transcription job.
\nTo view the status of the specified medical transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished and you can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
Provides detailed information about a medical transcription job.
\nTo view the status of the specified medical transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished and you can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
The date and time the specified medical transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time your medical transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time your medical transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
The date and time the specified medical transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
Provides the status of your medical transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
Provides the status of your medical transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
. If the status is\n FAILED
, FailureReason
provides details on why your\n transcription job failed.
Indicates where the specified medical transcription output is stored.
\nIf the value is CUSTOMER_BUCKET
, the location is the Amazon S3\n bucket you specified using the OutputBucketName
parameter in your request. If you also included\n OutputKey
in your request, your output is located in the path you\n specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed Amazon S3 bucket. To access a transcript stored in a service-managed bucket, use the\n URI shown in the TranscriptFileUri
field.
Indicates where the specified medical transcription output is stored.
\nIf the value is CUSTOMER_BUCKET
, the location is the Amazon S3\n bucket you specified using the OutputBucketName
parameter in your request. If you also included\n OutputKey
in your request, your output is located in the path you\n specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed Amazon S3 bucket. To access a transcript stored in a service-managed bucket, use the\n URI shown in the TranscriptFileUri
field.
Enables speaker partitioning (diarization) in your transcription output. Speaker\n partitioning labels the speech from individual speakers in your media file.
\nIf you enable ShowSpeakerLabels
in your request, you must also include\n MaxSpeakerLabels
.
You can't include ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Partitioning speakers\n (diarization).
" + "smithy.api#documentation": "Enables speaker partitioning (diarization) in your transcription output. Speaker\n partitioning labels the speech from individual speakers in your media file.
\nIf you enable ShowSpeakerLabels
in your request, you must also include\n MaxSpeakerLabels
.
You can't include ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Partitioning speakers\n (diarization).
" } }, "MaxSpeakerLabels": { "target": "com.amazonaws.transcribe#MaxSpeakers", "traits": { - "smithy.api#documentation": "Specify the maximum number of speakers you want to partition in your media.
\nNote that if your media contains more speakers than the specified number, multiple\n speakers are treated as a single speaker.
\nIf you specify the MaxSpeakerLabels
field, you must set the\n ShowSpeakerLabels
field to true.
Specify the maximum number of speakers you want to partition in your media.
\nNote that if your media contains more speakers than the specified number, multiple\n speakers are treated as a single speaker.
\nIf you specify the MaxSpeakerLabels
field, you must set the\n ShowSpeakerLabels
field to true.
Enables channel identification in multi-channel audio.
\nChannel identification transcribes the audio on each channel independently, then\n appends the output for each channel into one transcript.
\nIf you have multi-channel audio and do not enable channel identification, your audio\n is transcribed in a continuous manner and your transcript does not separate the speech\n by channel.
\nYou can't include both ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Transcribing multi-channel\n audio.
" + "smithy.api#documentation": "Enables channel identification in multi-channel audio.
\nChannel identification transcribes the audio on each channel independently, then\n appends the output for each channel into one transcript.
\nIf you have multi-channel audio and do not enable channel identification, your audio\n is transcribed in a continuous manner and your transcript does not separate the speech\n by channel.
\nYou can't include both ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Transcribing multi-channel\n audio.
" } }, "ShowAlternatives": { "target": "com.amazonaws.transcribe#Boolean", "traits": { - "smithy.api#documentation": "To include alternative transcriptions within your transcription output, include\n ShowAlternatives
in your transcription request.
If you include ShowAlternatives
, you must also include\n MaxAlternatives
, which is the maximum number of alternative\n transcriptions you want Amazon Transcribe Medical to generate.
For more information, see Alternative\n transcriptions.
" + "smithy.api#documentation": "To include alternative transcriptions within your transcription output, include\n ShowAlternatives
in your transcription request.
If you include ShowAlternatives
, you must also include\n MaxAlternatives
, which is the maximum number of alternative\n transcriptions you want Amazon Transcribe Medical to generate.
For more information, see Alternative\n transcriptions.
" } }, "MaxAlternatives": { "target": "com.amazonaws.transcribe#MaxAlternatives", "traits": { - "smithy.api#documentation": "Indicate the maximum number of alternative transcriptions you want Amazon Transcribe\n Medical to include in your transcript.
\nIf you select a number greater than the number of alternative transcriptions generated\n by Amazon Transcribe Medical, only the actual number of alternative transcriptions are\n included.
\nIf you include MaxAlternatives
in your request, you must also include\n ShowAlternatives
with a value of true
.
For more information, see Alternative\n transcriptions.
" + "smithy.api#documentation": "Indicate the maximum number of alternative transcriptions you want Amazon Transcribe\n Medical to include in your transcript.
\nIf you select a number greater than the number of alternative transcriptions generated\n by Amazon Transcribe Medical, only the actual number of alternative transcriptions are\n included.
\nIf you include MaxAlternatives
in your request, you must also include\n ShowAlternatives
with a value of true
.
For more information, see Alternative\n transcriptions.
" } }, "VocabularyName": { "target": "com.amazonaws.transcribe#VocabularyName", "traits": { - "smithy.api#documentation": "The name of the custom vocabulary you want to use when processing your medical\n transcription job. Custom vocabulary names are case sensitive.
\nThe language of the specified custom vocabulary must match the language code that you\n specify in your transcription request. If the languages don't match, the custom\n vocabulary isn't applied. There are no errors or warnings associated with a language\n mismatch. US English (en-US
) is the only valid language for Amazon Transcribe Medical.
The name of the custom vocabulary you want to use when processing your medical\n transcription job. Custom vocabulary names are case sensitive.
\nThe language of the specified custom vocabulary must match the language code that you\n specify in your transcription request. If the languages don't match, the custom\n vocabulary isn't applied. There are no errors or warnings associated with a language\n mismatch. US English (en-US
) is the only valid language for Amazon Transcribe Medical.
The name of the custom language model you want to use when processing your\n transcription job. Note that custom language model names are case sensitive.
\nThe language of the specified custom language model must match the language code that\n you specify in your transcription request. If the languages don't match, the custom\n language model isn't applied. There are no errors or warnings associated with a language\n mismatch.
" + "smithy.api#documentation": "The name of the custom language model you want to use when processing your\n transcription job. Note that custom language model names are case sensitive.
\nThe language of the specified custom language model must match the language code that\n you specify in your transcription request. If the languages don't match, the custom\n language model isn't applied. There are no errors or warnings associated with a language\n mismatch.
" } } }, "traits": { - "smithy.api#documentation": "Provides the name of the custom language model that was included in the specified\n transcription job.
\nOnly use ModelSettings
with the LanguageModelName
\n sub-parameter if you're not using automatic language\n identification (). If using\n
LanguageIdSettings
in your request, this parameter contains a\n LanguageModelName
sub-parameter.
Provides the name of the custom language model that was included in the specified\n transcription job.
\nOnly use ModelSettings
with the LanguageModelName
\n sub-parameter if you're not using automatic language\n identification (). If using\n
LanguageIdSettings
in your request, this parameter contains a\n LanguageModelName
sub-parameter.
Flag the presence or absence of periods of silence in your Call Analytics\n transcription output.
\nRules using NonTalkTimeFilter
are designed to match:
The presence of silence at specified periods throughout the call
\nThe presence of speech at specified periods throughout the call
\nSee Rule criteria for batch \n categories for usage examples.
" + "smithy.api#documentation": "Flag the presence or absence of periods of silence in your Call Analytics\n transcription output.
\nRules using NonTalkTimeFilter
are designed to match:
The presence of silence at specified periods throughout the call
\nThe presence of speech at specified periods throughout the call
\nSee Rule criteria for batch \n categories for usage examples.
" } }, "com.amazonaws.transcribe#NotFoundException": { @@ -3845,7 +3833,7 @@ } }, "traits": { - "smithy.api#documentation": "A time range, in percentage, between two points in your media file.
\nYou can use StartPercentage
and EndPercentage
to search a\n custom segment. For example, setting StartPercentage
to 10 and\n EndPercentage
to 50 only searches for your specified criteria in the\n audio contained between the 10 percent mark and the 50 percent mark of your media\n file.
You can use also First
to search from the start of the media file until\n the time that you specify. Or use Last
to search from the time that you\n specify until the end of the media file. For example, setting First
to 10\n only searches for your specified criteria in the audio contained in the first 10 percent\n of the media file.
If you prefer to use milliseconds instead of percentage, see .
" + "smithy.api#documentation": "A time range, in percentage, between two points in your media file.
\nYou can use StartPercentage
and EndPercentage
to search a\n custom segment. For example, setting StartPercentage
to 10 and\n EndPercentage
to 50 only searches for your specified criteria in the\n audio contained between the 10 percent mark and the 50 percent mark of your media\n file.
You can use also First
to search from the start of the media file until\n the time that you specify. Or use Last
to search from the time that you\n specify until the end of the media file. For example, setting First
to 10\n only searches for your specified criteria in the audio contained in the first 10 percent\n of the media file.
If you prefer to use milliseconds instead of percentage, see .
" } }, "com.amazonaws.transcribe#Rule": { @@ -3877,7 +3865,7 @@ } }, "traits": { - "smithy.api#documentation": "A rule is a set of criteria that you can specify to flag an attribute in your Call\n Analytics output. Rules define a Call Analytics category.
\nRules can include these parameters: , , , and .
\nTo learn more about Call Analytics rules and categories, see Creating categories for batch\n transcriptions and Creating categories for\n streaming transcriptions.
\nTo learn more about Call Analytics, see Analyzing call center audio with Call\n Analytics.
" + "smithy.api#documentation": "A rule is a set of criteria that you can specify to flag an attribute in your Call\n Analytics output. Rules define a Call Analytics category.
\nRules can include these parameters: , , , and .
\nTo learn more about Call Analytics rules and categories, see Creating categories for batch\n transcriptions and Creating categories for\n streaming transcriptions.
\nTo learn more about Call Analytics, see Analyzing call center audio with Call\n Analytics.
" } }, "com.amazonaws.transcribe#RuleList": { @@ -3928,7 +3916,7 @@ } }, "traits": { - "smithy.api#documentation": "Flag the presence or absence of specific sentiments detected in your Call Analytics\n transcription output.
\nRules using SentimentFilter
are designed to match:
The presence or absence of a positive sentiment felt by the customer, agent,\n or both at specified points in the call
\nThe presence or absence of a negative sentiment felt by the customer, agent,\n or both at specified points in the call
\nThe presence or absence of a neutral sentiment felt by the customer, agent, or\n both at specified points in the call
\nThe presence or absence of a mixed sentiment felt by the customer, the agent,\n or both at specified points in the call
\nSee Rule criteria for batch \n categories for usage examples.
" + "smithy.api#documentation": "Flag the presence or absence of specific sentiments detected in your Call Analytics\n transcription output.
\nRules using SentimentFilter
are designed to match:
The presence or absence of a positive sentiment felt by the customer, agent,\n or both at specified points in the call
\nThe presence or absence of a negative sentiment felt by the customer, agent,\n or both at specified points in the call
\nThe presence or absence of a neutral sentiment felt by the customer, agent, or\n both at specified points in the call
\nThe presence or absence of a mixed sentiment felt by the customer, the agent,\n or both at specified points in the call
\nSee Rule criteria for batch \n categories for usage examples.
" } }, "com.amazonaws.transcribe#SentimentValue": { @@ -3984,43 +3972,43 @@ "ShowSpeakerLabels": { "target": "com.amazonaws.transcribe#Boolean", "traits": { - "smithy.api#documentation": "Enables speaker partitioning (diarization) in your transcription output. Speaker\n partitioning labels the speech from individual speakers in your media file.
\nIf you enable ShowSpeakerLabels
in your request, you must also include\n MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Partitioning speakers\n (diarization).
" + "smithy.api#documentation": "Enables speaker partitioning (diarization) in your transcription output. Speaker\n partitioning labels the speech from individual speakers in your media file.
\nIf you enable ShowSpeakerLabels
in your request, you must also include\n MaxSpeakerLabels
.
You can't include both ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Partitioning speakers\n (diarization).
" } }, "MaxSpeakerLabels": { "target": "com.amazonaws.transcribe#MaxSpeakers", "traits": { - "smithy.api#documentation": "Specify the maximum number of speakers you want to partition in your media.
\nNote that if your media contains more speakers than the specified number, multiple\n speakers are treated as a single speaker.
\nIf you specify the MaxSpeakerLabels
field, you must set the\n ShowSpeakerLabels
field to true.
Specify the maximum number of speakers you want to partition in your media.
\nNote that if your media contains more speakers than the specified number, multiple\n speakers are treated as a single speaker.
\nIf you specify the MaxSpeakerLabels
field, you must set the\n ShowSpeakerLabels
field to true.
Enables channel identification in multi-channel audio.
\nChannel identification transcribes the audio on each channel independently, then\n appends the output for each channel into one transcript.
\nYou can't include both ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Transcribing multi-channel\n audio.
" + "smithy.api#documentation": "Enables channel identification in multi-channel audio.
\nChannel identification transcribes the audio on each channel independently, then\n appends the output for each channel into one transcript.
\nYou can't include both ShowSpeakerLabels
and\n ChannelIdentification
in the same request. Including both parameters\n returns a BadRequestException
.
For more information, see Transcribing multi-channel\n audio.
" } }, "ShowAlternatives": { "target": "com.amazonaws.transcribe#Boolean", "traits": { - "smithy.api#documentation": "To include alternative transcriptions within your transcription output, include\n ShowAlternatives
in your transcription request.
If you have multi-channel audio and do not enable channel identification, your audio\n is transcribed in a continuous manner and your transcript does not separate the speech\n by channel.
\nIf you include ShowAlternatives
, you must also include\n MaxAlternatives
, which is the maximum number of alternative\n transcriptions you want Amazon Transcribe to generate.
For more information, see Alternative\n transcriptions.
" + "smithy.api#documentation": "To include alternative transcriptions within your transcription output, include\n ShowAlternatives
in your transcription request.
If you have multi-channel audio and do not enable channel identification, your audio\n is transcribed in a continuous manner and your transcript does not separate the speech\n by channel.
\nIf you include ShowAlternatives
, you must also include\n MaxAlternatives
, which is the maximum number of alternative\n transcriptions you want Amazon Transcribe to generate.
For more information, see Alternative\n transcriptions.
" } }, "MaxAlternatives": { "target": "com.amazonaws.transcribe#MaxAlternatives", "traits": { - "smithy.api#documentation": "Indicate the maximum number of alternative transcriptions you want Amazon Transcribe\n to include in your transcript.
\nIf you select a number greater than the number of alternative transcriptions generated\n by Amazon Transcribe, only the actual number of alternative transcriptions are\n included.
\nIf you include MaxAlternatives
in your request, you must also include\n ShowAlternatives
with a value of true
.
For more information, see Alternative\n transcriptions.
" + "smithy.api#documentation": "Indicate the maximum number of alternative transcriptions you want Amazon Transcribe\n to include in your transcript.
\nIf you select a number greater than the number of alternative transcriptions generated\n by Amazon Transcribe, only the actual number of alternative transcriptions are\n included.
\nIf you include MaxAlternatives
in your request, you must also include\n ShowAlternatives
with a value of true
.
For more information, see Alternative\n transcriptions.
" } }, "VocabularyFilterName": { "target": "com.amazonaws.transcribe#VocabularyFilterName", "traits": { - "smithy.api#documentation": "The name of the custom vocabulary filter you want to use in your transcription job\n request. This name is case sensitive, cannot contain spaces, and must be unique within\n an Amazon Web Services account.
\nNote that if you include VocabularyFilterName
in your request, you must\n also include VocabularyFilterMethod
.
The name of the custom vocabulary filter you want to use in your transcription job\n request. This name is case sensitive, cannot contain spaces, and must be unique within\n an Amazon Web Services account.
\nNote that if you include VocabularyFilterName
in your request, you must\n also include VocabularyFilterMethod
.
Specify how you want your custom vocabulary filter applied to your transcript.
\nTo replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
Specify how you want your custom vocabulary filter applied to your transcript.
\nTo replace words with ***
, choose mask
.
To delete words, choose remove
.
To flag words without changing them, choose tag
.
Transcribes the audio from a customer service call and applies any additional Request\n Parameters you choose to include in your request.
\nIn addition to many standard transcription features, Call Analytics provides you with\n call characteristics, call summarization, speaker sentiment, and optional redaction of\n your text transcript and your audio file. You can also apply custom categories to flag\n specified conditions. To learn more about these features and insights, refer to Analyzing call\n center audio with Call Analytics.
\nIf you want to apply categories to your Call Analytics job, you must create them\n before submitting your job request. Categories cannot be retroactively applied to a job.\n To create a new category, use the \n operation. To learn more about Call Analytics categories, see Creating categories for batch \n transcriptions and Creating categories for \n streaming transcriptions.
\nTo make a StartCallAnalyticsJob
request, you must first upload your media\n file into an Amazon S3 bucket; you can then specify the Amazon S3\n location of the file using the Media
parameter.
You must include the following parameters in your StartCallAnalyticsJob
\n request:
\n region
: The Amazon Web Services Region where you are making your\n request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and\n quotas.
\n CallAnalyticsJobName
: A custom name that you create for your\n transcription job that's unique within your Amazon Web Services account.
\n DataAccessRoleArn
: The Amazon Resource Name (ARN) of an IAM role\n that has permissions to access the Amazon S3 bucket that contains your\n input files.
\n Media
(MediaFileUri
or\n RedactedMediaFileUri
): The Amazon S3 location of your\n media file.
With Call Analytics, you can redact the audio contained in your media file by\n including RedactedMediaFileUri
, instead of MediaFileUri
,\n to specify the location of your input audio. If you choose to redact your audio, you\n can find your redacted media at the location specified in the\n RedactedMediaFileUri
field of your response.
Transcribes the audio from a customer service call and applies any additional Request\n Parameters you choose to include in your request.
\nIn addition to many standard transcription features, Call Analytics provides you with\n call characteristics, call summarization, speaker sentiment, and optional redaction of\n your text transcript and your audio file. You can also apply custom categories to flag\n specified conditions. To learn more about these features and insights, refer to Analyzing call\n center audio with Call Analytics.
\nIf you want to apply categories to your Call Analytics job, you must create them\n before submitting your job request. Categories cannot be retroactively applied to a job.\n To create a new category, use the \n operation. To learn more about Call Analytics categories, see Creating categories for batch \n transcriptions and Creating categories for \n streaming transcriptions.
\nTo make a StartCallAnalyticsJob
request, you must first upload your media\n file into an Amazon S3 bucket; you can then specify the Amazon S3\n location of the file using the Media
parameter.
You must include the following parameters in your StartCallAnalyticsJob
\n request:
\n region
: The Amazon Web Services Region where you are making your\n request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and\n quotas.
\n CallAnalyticsJobName
: A custom name that you create for your\n transcription job that's unique within your Amazon Web Services account.
\n DataAccessRoleArn
: The Amazon Resource Name (ARN) of an IAM role\n that has permissions to access the Amazon S3 bucket that contains your\n input files.
\n Media
(MediaFileUri
or\n RedactedMediaFileUri
): The Amazon S3 location of your\n media file.
With Call Analytics, you can redact the audio contained in your media file by\n including RedactedMediaFileUri
, instead of MediaFileUri
,\n to specify the location of your input audio. If you choose to redact your audio, you\n can find your redacted media at the location specified in the\n RedactedMediaFileUri
field of your response.
A unique name, chosen by you, for your Call Analytics job.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new job with the same name as an\n existing job, you get a ConflictException
error.
A unique name, chosen by you, for your Call Analytics job.
\nThis name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new job with the same name as an\n existing job, you get a ConflictException
error.
The Amazon S3 location where you want your Call Analytics transcription output\n stored. You can use any of the following formats to specify the output location:
\ns3://DOC-EXAMPLE-BUCKET
\ns3://DOC-EXAMPLE-BUCKET/my-output-folder/
\ns3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json
\nUnless you specify a file name (option 3), the name of your output file has a default\n value that matches the name you specified for your transcription job using the\n CallAnalyticsJobName
parameter.
You can specify a KMS key to encrypt your output using the\n OutputEncryptionKMSKeyId
parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for\n server-side encryption.
If you don't specify OutputLocation
, your transcript is placed in a\n service-managed Amazon S3 bucket and you are provided with a URI to access your\n transcript.
The Amazon S3 location where you want your Call Analytics transcription output\n stored. You can use any of the following formats to specify the output location:
\ns3://DOC-EXAMPLE-BUCKET
\ns3://DOC-EXAMPLE-BUCKET/my-output-folder/
\ns3://DOC-EXAMPLE-BUCKET/my-output-folder/my-call-analytics-job.json
\nUnless you specify a file name (option 3), the name of your output file has a default\n value that matches the name you specified for your transcription job using the\n CallAnalyticsJobName
parameter.
You can specify a KMS key to encrypt your output using the\n OutputEncryptionKMSKeyId
parameter. If you don't specify a KMS key, Amazon Transcribe uses the default Amazon S3 key for\n server-side encryption.
If you don't specify OutputLocation
, your transcript is placed in a\n service-managed Amazon S3 bucket and you are provided with a URI to access your\n transcript.
The KMS key you want to use to encrypt your Call Analytics\n output.
\nIf using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:
\nUse the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab
.
Use an alias for the KMS key ID. For example,\n alias/ExampleAlias
.
Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:
\nUse the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If you don't specify an encryption key, your output is encrypted with the default\n Amazon S3 key (SSE-S3).
\nIf you specify a KMS key to encrypt your output, you must also specify\n an output location using the OutputLocation
parameter.
Note that the user making the request must\n have permission to use the specified KMS key.
" + "smithy.api#documentation": "The KMS key you want to use to encrypt your Call Analytics\n output.
\nIf using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:
\nUse the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab
.
Use an alias for the KMS key ID. For example,\n alias/ExampleAlias
.
Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:
\nUse the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If you don't specify an encryption key, your output is encrypted with the default\n Amazon S3 key (SSE-S3).
\nIf you specify a KMS key to encrypt your output, you must also specify\n an output location using the OutputLocation
parameter.
Note that the user making the request must\n have permission to use the specified KMS key.
" } }, "DataAccessRoleArn": { "target": "com.amazonaws.transcribe#DataAccessRoleArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to\n access the Amazon S3 bucket that contains your input files. If the role that you\n specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
\nIAM role ARNs have the format\n arn:partition:iam::account:role/role-name-with-path
. For example:\n arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM\n ARNs.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an IAM role that has permissions to\n access the Amazon S3 bucket that contains your input files. If the role that you\n specify doesn’t have the appropriate permissions to access the specified Amazon S3 location, your request fails.
\nIAM role ARNs have the format\n arn:partition:iam::account:role/role-name-with-path
. For example:\n arn:aws:iam::111122223333:role/Admin
.
For more information, see IAM\n ARNs.
" } }, "Settings": { @@ -4154,7 +4142,7 @@ } ], "traits": { - "smithy.api#documentation": "Transcribes the audio from a medical dictation or conversation and applies any\n additional Request Parameters you choose to include in your request.
\nIn addition to many standard transcription features, Amazon Transcribe Medical\n provides you with a robust medical vocabulary and, optionally, content identification,\n which adds flags to personal health information (PHI). To learn more about these\n features, refer to How Amazon Transcribe Medical\n works.
\nTo make a StartMedicalTranscriptionJob
request, you must first upload\n your media file into an Amazon S3 bucket; you can then specify the S3 location\n of the file using the Media
parameter.
You must include the following parameters in your\n StartMedicalTranscriptionJob
request:
\n region
: The Amazon Web Services Region where you are making your\n request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and\n quotas.
\n MedicalTranscriptionJobName
: A custom name you create for your\n transcription job that is unique within your Amazon Web Services account.
\n Media
(MediaFileUri
): The Amazon S3 location\n of your media file.
\n LanguageCode
: This must be en-US
.
\n OutputBucketName
: The Amazon S3 bucket where you want\n your transcript stored. If you want your output stored in a sub-folder of this\n bucket, you must also include OutputKey
.
\n Specialty
: This must be PRIMARYCARE
.
\n Type
: Choose whether your audio is a conversation or a\n dictation.
Transcribes the audio from a medical dictation or conversation and applies any\n additional Request Parameters you choose to include in your request.
\nIn addition to many standard transcription features, Amazon Transcribe Medical\n provides you with a robust medical vocabulary and, optionally, content identification,\n which adds flags to personal health information (PHI). To learn more about these\n features, refer to How Amazon Transcribe Medical\n works.
\nTo make a StartMedicalTranscriptionJob
request, you must first upload\n your media file into an Amazon S3 bucket; you can then specify the S3 location\n of the file using the Media
parameter.
You must include the following parameters in your\n StartMedicalTranscriptionJob
request:
\n region
: The Amazon Web Services Region where you are making your\n request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and\n quotas.
\n MedicalTranscriptionJobName
: A custom name you create for your\n transcription job that is unique within your Amazon Web Services account.
\n Media
(MediaFileUri
): The Amazon S3 location\n of your media file.
\n LanguageCode
: This must be en-US
.
\n OutputBucketName
: The Amazon S3 bucket where you want\n your transcript stored. If you want your output stored in a sub-folder of this\n bucket, you must also include OutputKey
.
\n Specialty
: This must be PRIMARYCARE
.
\n Type
: Choose whether your audio is a conversation or a\n dictation.
A unique name, chosen by you, for your medical transcription job. The name that you\n specify is also used as the default name of your transcription output file. If you want\n to specify a different name for your transcription output, use the\n OutputKey
parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new job with the same name as an\n existing job, you get a ConflictException
error.
A unique name, chosen by you, for your medical transcription job. The name that you\n specify is also used as the default name of your transcription output file. If you want\n to specify a different name for your transcription output, use the\n OutputKey
parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new job with the same name as an\n existing job, you get a ConflictException
error.
The sample rate, in hertz, of the audio track in your input media file.
\nIf you don't specify the media sample rate, Amazon Transcribe Medical determines it\n for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe Medical; if there's a mismatch between the value that you specify and the\n value detected, your job fails. Therefore, in most cases, it's advised to omit\n MediaSampleRateHertz
and let Amazon Transcribe Medical determine the\n sample rate.
The sample rate, in hertz, of the audio track in your input media file.
\nIf you don't specify the media sample rate, Amazon Transcribe Medical determines it\n for you. If you specify the sample rate, it must match the rate detected by Amazon Transcribe Medical; if there's a mismatch between the value that you specify and the\n value detected, your job fails. Therefore, in most cases, it's advised to omit\n MediaSampleRateHertz
and let Amazon Transcribe Medical determine the\n sample rate.
The name of the Amazon S3 bucket where you want your medical transcription\n output stored. Do not include the S3://
prefix of the specified\n bucket.
If you want your output to go to a sub-folder of this bucket, specify it using the\n OutputKey
parameter; OutputBucketName
only accepts the\n name of a bucket.
For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
,\n set OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you\n want your output stored in S3://DOC-EXAMPLE-BUCKET/test-files/
, set\n OutputBucketName
to DOC-EXAMPLE-BUCKET
and\n OutputKey
to test-files/
.
Note that Amazon Transcribe must have permission to use the specified location. You\n can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles.
", + "smithy.api#documentation": "The name of the Amazon S3 bucket where you want your medical transcription\n output stored. Do not include the S3://
prefix of the specified\n bucket.
If you want your output to go to a sub-folder of this bucket, specify it using the\n OutputKey
parameter; OutputBucketName
only accepts the\n name of a bucket.
For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
,\n set OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you\n want your output stored in S3://DOC-EXAMPLE-BUCKET/test-files/
, set\n OutputBucketName
to DOC-EXAMPLE-BUCKET
and\n OutputKey
to test-files/
.
Note that Amazon Transcribe must have permission to use the specified location. You\n can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles.
", "smithy.api#required": {} } }, "OutputKey": { "target": "com.amazonaws.transcribe#OutputKey", "traits": { - "smithy.api#documentation": "Use in combination with OutputBucketName
to specify the output location\n of your transcript and, optionally, a unique name for your output file. The default name\n for your transcription output is the same as the name you specified for your medical\n transcription job (MedicalTranscriptionJobName
).
Here are some examples of how you can use OutputKey
:
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'my-transcript.json' as the OutputKey
, your transcription output\n path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
If you specify 'my-first-transcription' as the\n MedicalTranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'test-files/my-transcript.json' as the OutputKey
, your\n transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
If you specify 'my-first-transcription' as the\n MedicalTranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'test-files/my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist,\n one is created for you.
" + "smithy.api#documentation": "Use in combination with OutputBucketName
to specify the output location\n of your transcript and, optionally, a unique name for your output file. The default name\n for your transcription output is the same as the name you specified for your medical\n transcription job (MedicalTranscriptionJobName
).
Here are some examples of how you can use OutputKey
:
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'my-transcript.json' as the OutputKey
, your transcription output\n path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
If you specify 'my-first-transcription' as the\n MedicalTranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'test-files/my-transcript.json' as the OutputKey
, your\n transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
If you specify 'my-first-transcription' as the\n MedicalTranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'test-files/my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist,\n one is created for you.
" } }, "OutputEncryptionKMSKeyId": { "target": "com.amazonaws.transcribe#KMSKeyId", "traits": { - "smithy.api#documentation": "The KMS key you want to use to encrypt your medical transcription\n output.
\nIf using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:
\nUse the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab
.
Use an alias for the KMS key ID. For example,\n alias/ExampleAlias
.
Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:
\nUse the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If you don't specify an encryption key, your output is encrypted with the default\n Amazon S3 key (SSE-S3).
\nIf you specify a KMS key to encrypt your output, you must also specify\n an output location using the OutputLocation
parameter.
Note that the user making the request must\n have permission to use the specified KMS key.
" + "smithy.api#documentation": "The KMS key you want to use to encrypt your medical transcription\n output.
\nIf using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:
\nUse the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab
.
Use an alias for the KMS key ID. For example,\n alias/ExampleAlias
.
Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:
\nUse the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If you don't specify an encryption key, your output is encrypted with the default\n Amazon S3 key (SSE-S3).
\nIf you specify a KMS key to encrypt your output, you must also specify\n an output location using the OutputLocation
parameter.
Note that the user making the request must\n have permission to use the specified KMS key.
" } }, "KMSEncryptionContext": { @@ -4245,14 +4233,14 @@ "Type": { "target": "com.amazonaws.transcribe#Type", "traits": { - "smithy.api#documentation": "Specify whether your input media contains only one person (DICTATION
) or\n contains a conversation between two people (CONVERSATION
).
For example, DICTATION
could be used for a medical professional wanting\n to transcribe voice memos; CONVERSATION
could be used for transcribing the\n doctor-patient dialogue during the patient's office visit.
Specify whether your input media contains only one person (DICTATION
) or\n contains a conversation between two people (CONVERSATION
).
For example, DICTATION
could be used for a medical professional wanting\n to transcribe voice memos; CONVERSATION
could be used for transcribing the\n doctor-patient dialogue during the patient's office visit.
Adds one or more custom tags, each in the form of a key:value pair, to a new medical\n transcription job at the time you start this new job.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new medical\n transcription job at the time you start this new job.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" } } } @@ -4291,7 +4279,7 @@ } ], "traits": { - "smithy.api#documentation": "Transcribes the audio from a media file and applies any additional Request Parameters\n you choose to include in your request.
\nTo make a StartTranscriptionJob
request, you must first upload your media\n file into an Amazon S3 bucket; you can then specify the Amazon S3\n location of the file using the Media
parameter.
You must include the following parameters in your StartTranscriptionJob
\n request:
\n region
: The Amazon Web Services Region where you are making your\n request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and\n quotas.
\n TranscriptionJobName
: A custom name you create for your\n transcription job that is unique within your Amazon Web Services account.
\n Media
(MediaFileUri
): The Amazon S3 location\n of your media file.
One of LanguageCode
, IdentifyLanguage
, or\n IdentifyMultipleLanguages
: If you know the language of your\n media file, specify it using the LanguageCode
parameter; you can\n find all valid language codes in the Supported\n languages table. If you don't know the languages spoken in your\n media, use either IdentifyLanguage
or\n IdentifyMultipleLanguages
and let Amazon Transcribe identify\n the languages for you.
Transcribes the audio from a media file and applies any additional Request Parameters\n you choose to include in your request.
\nTo make a StartTranscriptionJob
request, you must first upload your media\n file into an Amazon S3 bucket; you can then specify the Amazon S3\n location of the file using the Media
parameter.
You must include the following parameters in your StartTranscriptionJob
\n request:
\n region
: The Amazon Web Services Region where you are making your\n request. For a list of Amazon Web Services Regions supported with Amazon Transcribe, refer to Amazon Transcribe endpoints and\n quotas.
\n TranscriptionJobName
: A custom name you create for your\n transcription job that is unique within your Amazon Web Services account.
\n Media
(MediaFileUri
): The Amazon S3 location\n of your media file.
One of LanguageCode
, IdentifyLanguage
, or\n IdentifyMultipleLanguages
: If you know the language of your\n media file, specify it using the LanguageCode
parameter; you can\n find all valid language codes in the Supported\n languages table. If you don't know the languages spoken in your\n media, use either IdentifyLanguage
or\n IdentifyMultipleLanguages
and let Amazon Transcribe identify\n the languages for you.
A unique name, chosen by you, for your transcription job. The name that you specify is\n also used as the default name of your transcription output file. If you want to specify\n a different name for your transcription output, use the OutputKey
\n parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new job with the same name as an\n existing job, you get a ConflictException
error.
A unique name, chosen by you, for your transcription job. The name that you specify is\n also used as the default name of your transcription output file. If you want to specify\n a different name for your transcription output, use the OutputKey
\n parameter.
This name is case sensitive, cannot contain spaces, and must be unique within an\n Amazon Web Services account. If you try to create a new job with the same name as an\n existing job, you get a ConflictException
error.
The language code that represents the language spoken in the input media file.
\nIf you're unsure of the language spoken in your media file, consider using\n IdentifyLanguage
or IdentifyMultipleLanguages
to enable\n automatic language identification.
Note that you must include one of LanguageCode
,\n IdentifyLanguage
, or IdentifyMultipleLanguages
in your\n request. If you include more than one of these parameters, your transcription job\n fails.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
\nTo transcribe speech in Modern Standard Arabic (ar-SA
), your media\n file must be encoded at a sample rate of 16,000 Hz or higher.
The language code that represents the language spoken in the input media file.
\nIf you're unsure of the language spoken in your media file, consider using\n IdentifyLanguage
or IdentifyMultipleLanguages
to enable\n automatic language identification.
Note that you must include one of LanguageCode
,\n IdentifyLanguage
, or IdentifyMultipleLanguages
in your\n request. If you include more than one of these parameters, your transcription job\n fails.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
\nTo transcribe speech in Modern Standard Arabic (ar-SA
), your media\n file must be encoded at a sample rate of 16,000 Hz or higher.
The sample rate, in hertz, of the audio track in your input media file.
\nIf you don't specify the media sample rate, Amazon Transcribe determines it for you.\n If you specify the sample rate, it must match the rate detected by Amazon Transcribe.\n If there's a mismatch between the value that you specify and the value detected, your\n job fails. In most cases, you can omit MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
The sample rate, in hertz, of the audio track in your input media file.
\nIf you don't specify the media sample rate, Amazon Transcribe determines it for you.\n If you specify the sample rate, it must match the rate detected by Amazon Transcribe.\n If there's a mismatch between the value that you specify and the value detected, your\n job fails. In most cases, you can omit MediaSampleRateHertz
and let Amazon Transcribe determine the sample rate.
The name of the Amazon S3 bucket where you want your transcription output\n stored. Do not include the S3://
prefix of the specified bucket.
If you want your output to go to a sub-folder of this bucket, specify it using the\n OutputKey
parameter; OutputBucketName
only accepts the\n name of a bucket.
For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
,\n set OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you\n want your output stored in S3://DOC-EXAMPLE-BUCKET/test-files/
, set\n OutputBucketName
to DOC-EXAMPLE-BUCKET
and\n OutputKey
to test-files/
.
Note that Amazon Transcribe must have permission to use the specified location. You\n can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles.
\nIf you don't specify OutputBucketName
, your transcript is placed in a\n service-managed Amazon S3 bucket and you are provided with a URI to access your\n transcript.
The name of the Amazon S3 bucket where you want your transcription output\n stored. Do not include the S3://
prefix of the specified bucket.
If you want your output to go to a sub-folder of this bucket, specify it using the\n OutputKey
parameter; OutputBucketName
only accepts the\n name of a bucket.
For example, if you want your output stored in S3://DOC-EXAMPLE-BUCKET
,\n set OutputBucketName
to DOC-EXAMPLE-BUCKET
. However, if you\n want your output stored in S3://DOC-EXAMPLE-BUCKET/test-files/
, set\n OutputBucketName
to DOC-EXAMPLE-BUCKET
and\n OutputKey
to test-files/
.
Note that Amazon Transcribe must have permission to use the specified location. You\n can change Amazon S3 permissions using the Amazon Web Services Management Console. See also Permissions Required for IAM User Roles.
\nIf you don't specify OutputBucketName
, your transcript is placed in a\n service-managed Amazon S3 bucket and you are provided with a URI to access your\n transcript.
Use in combination with OutputBucketName
to specify the output location\n of your transcript and, optionally, a unique name for your output file. The default name\n for your transcription output is the same as the name you specified for your\n transcription job (TranscriptionJobName
).
Here are some examples of how you can use OutputKey
:
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'my-transcript.json' as the OutputKey
, your transcription output\n path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
If you specify 'my-first-transcription' as the\n TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'test-files/my-transcript.json' as the OutputKey
, your\n transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
If you specify 'my-first-transcription' as the\n TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'test-files/my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist,\n one is created for you.
" + "smithy.api#documentation": "Use in combination with OutputBucketName
to specify the output location\n of your transcript and, optionally, a unique name for your output file. The default name\n for your transcription output is the same as the name you specified for your\n transcription job (TranscriptionJobName
).
Here are some examples of how you can use OutputKey
:
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'my-transcript.json' as the OutputKey
, your transcription output\n path is s3://DOC-EXAMPLE-BUCKET/my-transcript.json
.
If you specify 'my-first-transcription' as the\n TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/my-transcript/my-first-transcription.json
.
If you specify 'DOC-EXAMPLE-BUCKET' as the OutputBucketName
and\n 'test-files/my-transcript.json' as the OutputKey
, your\n transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript.json
.
If you specify 'my-first-transcription' as the\n TranscriptionJobName
, 'DOC-EXAMPLE-BUCKET' as the\n OutputBucketName
, and 'test-files/my-transcript' as the\n OutputKey
, your transcription output path is\n s3://DOC-EXAMPLE-BUCKET/test-files/my-transcript/my-first-transcription.json
.
If you specify the name of an Amazon S3 bucket sub-folder that doesn't exist,\n one is created for you.
" } }, "OutputEncryptionKMSKeyId": { "target": "com.amazonaws.transcribe#KMSKeyId", "traits": { - "smithy.api#documentation": "The KMS key you want to use to encrypt your transcription\n output.
\nIf using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:
\nUse the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab
.
Use an alias for the KMS key ID. For example,\n alias/ExampleAlias
.
Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:
\nUse the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If you don't specify an encryption key, your output is encrypted with the default\n Amazon S3 key (SSE-S3).
\nIf you specify a KMS key to encrypt your output, you must also specify\n an output location using the OutputLocation
parameter.
Note that the user making the request must\n have permission to use the specified KMS key.
" + "smithy.api#documentation": "The KMS key you want to use to encrypt your transcription\n output.
\nIf using a key located in the current\n Amazon Web Services account, you can specify your KMS key in one of four\n ways:
\nUse the KMS key ID itself. For example,\n 1234abcd-12ab-34cd-56ef-1234567890ab
.
Use an alias for the KMS key ID. For example,\n alias/ExampleAlias
.
Use the Amazon Resource Name (ARN) for the KMS key ID. For\n example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If using a key located in a different\n Amazon Web Services account than the current Amazon Web Services account, you can specify\n your KMS key in one of two ways:
\nUse the ARN for the KMS key ID. For example,\n arn:aws:kms:region:account-ID:key/1234abcd-12ab-34cd-56ef-1234567890ab
.
Use the ARN for the KMS key alias. For example,\n arn:aws:kms:region:account-ID:alias/ExampleAlias
.
If you don't specify an encryption key, your output is encrypted with the default\n Amazon S3 key (SSE-S3).
\nIf you specify a KMS key to encrypt your output, you must also specify\n an output location using the OutputLocation
parameter.
Note that the user making the request must\n have permission to use the specified KMS key.
" } }, "KMSEncryptionContext": { @@ -4362,19 +4350,19 @@ "Settings": { "target": "com.amazonaws.transcribe#Settings", "traits": { - "smithy.api#documentation": "Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker\n partitioning. You can use that to apply custom vocabularies and vocabulary filters.
\nIf you want to include a custom vocabulary or a custom vocabulary filter (or both)\n with your request but do not want to use automatic\n language identification, use Settings
with the VocabularyName
\n or VocabularyFilterName
(or both) sub-parameter.
If you're using automatic language identification with your request and want to\n include a custom language model, a custom vocabulary, or a custom vocabulary filter, use\n instead the parameter with the\n
LanguageModelName
, VocabularyName
or\n VocabularyFilterName
sub-parameters.
Specify additional optional settings in your request, including channel identification, alternative transcriptions, speaker\n partitioning. You can use that to apply custom vocabularies and vocabulary filters.
\nIf you want to include a custom vocabulary or a custom vocabulary filter (or both)\n with your request but do not want to use automatic\n language identification, use Settings
with the VocabularyName
\n or VocabularyFilterName
(or both) sub-parameter.
If you're using automatic language identification with your request and want to\n include a custom language model, a custom vocabulary, or a custom vocabulary filter, use\n instead the parameter with the\n
LanguageModelName
, VocabularyName
or\n VocabularyFilterName
sub-parameters.
Specify the custom language model you want to include with your transcription job. If\n you include ModelSettings
in your request, you must include the\n LanguageModelName
sub-parameter.
For more information, see Custom language\n models.
" + "smithy.api#documentation": "Specify the custom language model you want to include with your transcription job. If\n you include ModelSettings
in your request, you must include the\n LanguageModelName
sub-parameter.
For more information, see Custom language\n models.
" } }, "JobExecutionSettings": { "target": "com.amazonaws.transcribe#JobExecutionSettings", "traits": { - "smithy.api#documentation": "Makes it possible to control how your transcription job is processed. Currently, the\n only JobExecutionSettings
modification you can choose is enabling job\n queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also\n include the sub-parameters: AllowDeferredExecution
and\n DataAccessRoleArn
.
Makes it possible to control how your transcription job is processed. Currently, the\n only JobExecutionSettings
modification you can choose is enabling job\n queueing using the AllowDeferredExecution
sub-parameter.
If you include JobExecutionSettings
in your request, you must also\n include the sub-parameters: AllowDeferredExecution
and\n DataAccessRoleArn
.
Enables automatic language identification in your transcription job request. Use this\n parameter if your media file contains only one language. If your media contains multiple\n languages, use IdentifyMultipleLanguages
instead.
If you include IdentifyLanguage
, you can optionally include a list of\n language codes, using LanguageOptions
, that you think may be present in\n your media file. Including LanguageOptions
restricts\n IdentifyLanguage
to only the language options that you specify, which\n can improve transcription accuracy.
If you want to apply a custom language model, a custom vocabulary, or a custom\n vocabulary filter to your automatic language identification request, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
). If you include LanguageIdSettings
,\n also include LanguageOptions
.
Note that you must include one of LanguageCode
,\n IdentifyLanguage
, or IdentifyMultipleLanguages
in your\n request. If you include more than one of these parameters, your transcription job\n fails.
Enables automatic language identification in your transcription job request. Use this\n parameter if your media file contains only one language. If your media contains multiple\n languages, use IdentifyMultipleLanguages
instead.
If you include IdentifyLanguage
, you can optionally include a list of\n language codes, using LanguageOptions
, that you think may be present in\n your media file. Including LanguageOptions
restricts\n IdentifyLanguage
to only the language options that you specify, which\n can improve transcription accuracy.
If you want to apply a custom language model, a custom vocabulary, or a custom\n vocabulary filter to your automatic language identification request, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
). If you include LanguageIdSettings
,\n also include LanguageOptions
.
Note that you must include one of LanguageCode
,\n IdentifyLanguage
, or IdentifyMultipleLanguages
in your\n request. If you include more than one of these parameters, your transcription job\n fails.
Enables automatic multi-language identification in your transcription job request. Use\n this parameter if your media file contains more than one language. If your media\n contains only one language, use IdentifyLanguage
instead.
If you include IdentifyMultipleLanguages
, you can optionally include a\n list of language codes, using LanguageOptions
, that you think may be\n present in your media file. Including LanguageOptions
restricts\n IdentifyLanguage
to only the language options that you specify, which\n can improve transcription accuracy.
If you want to apply a custom vocabulary or a custom vocabulary filter to your\n automatic language identification request, include LanguageIdSettings
with\n the relevant sub-parameters (VocabularyName
and\n VocabularyFilterName
). If you include LanguageIdSettings
,\n also include LanguageOptions
.
Note that you must include one of LanguageCode
,\n IdentifyLanguage
, or IdentifyMultipleLanguages
in your\n request. If you include more than one of these parameters, your transcription job\n fails.
Enables automatic multi-language identification in your transcription job request. Use\n this parameter if your media file contains more than one language. If your media\n contains only one language, use IdentifyLanguage
instead.
If you include IdentifyMultipleLanguages
, you can optionally include a\n list of language codes, using LanguageOptions
, that you think may be\n present in your media file. Including LanguageOptions
restricts\n IdentifyLanguage
to only the language options that you specify, which\n can improve transcription accuracy.
If you want to apply a custom vocabulary or a custom vocabulary filter to your\n automatic language identification request, include LanguageIdSettings
with\n the relevant sub-parameters (VocabularyName
and\n VocabularyFilterName
). If you include LanguageIdSettings
,\n also include LanguageOptions
.
Note that you must include one of LanguageCode
,\n IdentifyLanguage
, or IdentifyMultipleLanguages
in your\n request. If you include more than one of these parameters, your transcription job\n fails.
You can specify two or more language codes that represent the languages you think may\n be present in your media. Including more than five is not recommended. If you're unsure\n what languages are present, do not include this parameter.
\nIf you include LanguageOptions
in your request, you must also include\n IdentifyLanguage
.
For more information, refer to Supported\n languages.
\nTo transcribe speech in Modern Standard Arabic (ar-SA
), your media file\n must be encoded at a sample rate of 16,000 Hz or higher.
You can specify two or more language codes that represent the languages you think may\n be present in your media. Including more than five is not recommended. If you're unsure\n what languages are present, do not include this parameter.
\nIf you include LanguageOptions
in your request, you must also include\n IdentifyLanguage
.
For more information, refer to Supported\n languages.
\nTo transcribe speech in Modern Standard Arabic (ar-SA
), your media file\n must be encoded at a sample rate of 16,000 Hz or higher.
Adds one or more custom tags, each in the form of a key:value pair, to a new\n transcription job at the time you start this new job.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to a new\n transcription job at the time you start this new job.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
" } }, "LanguageIdSettings": { "target": "com.amazonaws.transcribe#LanguageIdSettingsMap", "traits": { - "smithy.api#documentation": "If using automatic language identification in your request and you want to apply a\n custom language model, a custom vocabulary, or a custom vocabulary filter, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
). Note that multi-language identification\n (IdentifyMultipleLanguages
) doesn't support custom language\n models.
\n LanguageIdSettings
supports two to five language codes. Each language\n code you include can have an associated custom language model, custom vocabulary, and\n custom vocabulary filter. The language codes that you specify must match the languages\n of the associated custom language models, custom vocabularies, and custom vocabulary\n filters.
It's recommended that you include LanguageOptions
when using\n LanguageIdSettings
to ensure that the correct language dialect is\n identified. For example, if you specify a custom vocabulary that is in\n en-US
but Amazon Transcribe determines that the language spoken in\n your media is en-AU
, your custom vocabulary is not\n applied to your transcription. If you include LanguageOptions
and include\n en-US
as the only English language dialect, your custom vocabulary\n is applied to your transcription.
If you want to include a custom language model with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
LanguageModelName
sub-parameter. If you want to include a custom\n vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
VocabularyName
or VocabularyFilterName
(or both)\n sub-parameter.
If using automatic language identification in your request and you want to apply a\n custom language model, a custom vocabulary, or a custom vocabulary filter, include\n LanguageIdSettings
with the relevant sub-parameters\n (VocabularyName
, LanguageModelName
, and\n VocabularyFilterName
). Note that multi-language identification\n (IdentifyMultipleLanguages
) doesn't support custom language\n models.
\n LanguageIdSettings
supports two to five language codes. Each language\n code you include can have an associated custom language model, custom vocabulary, and\n custom vocabulary filter. The language codes that you specify must match the languages\n of the associated custom language models, custom vocabularies, and custom vocabulary\n filters.
It's recommended that you include LanguageOptions
when using\n LanguageIdSettings
to ensure that the correct language dialect is\n identified. For example, if you specify a custom vocabulary that is in\n en-US
but Amazon Transcribe determines that the language spoken in\n your media is en-AU
, your custom vocabulary is not\n applied to your transcription. If you include LanguageOptions
and include\n en-US
as the only English language dialect, your custom vocabulary\n is applied to your transcription.
If you want to include a custom language model with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
LanguageModelName
sub-parameter. If you want to include a custom\n vocabulary or a custom vocabulary filter (or both) with your request but do not want to use automatic language identification, use\n instead the parameter with the\n
VocabularyName
or VocabularyFilterName
(or both)\n sub-parameter.
Specify the starting value that is assigned to the first subtitle segment.
\nThe default start index for Amazon Transcribe is 0
, which differs from\n the more widely used standard of 1
. If you're uncertain which value to use,\n we recommend choosing 1
, as this may improve compatibility with other\n services.
Specify the starting value that is assigned to the first subtitle segment.
\nThe default start index for Amazon Transcribe is 0
, which differs from\n the more widely used standard of 1
. If you're uncertain which value to use,\n we recommend choosing 1
, as this may improve compatibility with other\n services.
Generate subtitles for your media file with your transcription request.
\nYou can choose a start index of 0 or 1, and you can specify either WebVTT or SubRip\n (or both) as your output format.
\nNote that your subtitle files are placed in the same location as your transcription\n output.
" + "smithy.api#documentation": "Generate subtitles for your media file with your transcription request.
\nYou can choose a start index of 0 or 1, and you can specify either WebVTT or SubRip\n (or both) as your output format.
\nNote that your subtitle files are placed in the same location as your transcription\n output.
" } }, "com.amazonaws.transcribe#SubtitlesOutput": { @@ -4516,7 +4504,7 @@ "SubtitleFileUris": { "target": "com.amazonaws.transcribe#SubtitleFileUris", "traits": { - "smithy.api#documentation": "The Amazon S3 location of your transcript. You can use this URI to access or\n download your subtitle file. Your subtitle file is stored in the same location as your\n transcript. If you specified both WebVTT and SubRip subtitle formats, two URIs are\n provided.
\nIf you included OutputBucketName
in your transcription job request, this\n is the URI of that bucket. If you also included OutputKey
in your request,\n your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request,\n your subtitle file is stored in a service-managed bucket, and\n TranscriptFileUri
provides you with a temporary URI you can use for\n secure access to your subtitle file.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15\n minutes. If you get an AccesDenied
error, you can get a new temporary\n URI by running a GetTranscriptionJob
or\n ListTranscriptionJob
request.
The Amazon S3 location of your transcript. You can use this URI to access or\n download your subtitle file. Your subtitle file is stored in the same location as your\n transcript. If you specified both WebVTT and SubRip subtitle formats, two URIs are\n provided.
\nIf you included OutputBucketName
in your transcription job request, this\n is the URI of that bucket. If you also included OutputKey
in your request,\n your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request,\n your subtitle file is stored in a service-managed bucket, and\n TranscriptFileUri
provides you with a temporary URI you can use for\n secure access to your subtitle file.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15\n minutes. If you get an AccesDenied
error, you can get a new temporary\n URI by running a GetTranscriptionJob
or\n ListTranscriptionJob
request.
The second part of a key:value pair that forms a tag associated with a given resource.\n For example, in the tag Department:Sales
, the value is 'Sales'.
Note that you can set the value of a tag to an empty string, but you can't set the\n value of a tag to null. Omitting the tag value is the same as using an empty\n string.
", + "smithy.api#documentation": "The second part of a key:value pair that forms a tag associated with a given resource.\n For example, in the tag Department:Sales
, the value is 'Sales'.
Note that you can set the value of a tag to an empty string, but you can't set the\n value of a tag to null. Omitting the tag value is the same as using an empty\n string.
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Adds metadata, in the form of a key:value pair, to the specified resource.
\nFor example, you could add the tag Department:Sales
to a resource to\n indicate that it pertains to your organization's sales department. You can also use tags\n for tag-based access control.
To learn more about tagging, see Tagging resources.
" + "smithy.api#documentation": "Adds metadata, in the form of a key:value pair, to the specified resource.
\nFor example, you could add the tag Department:Sales
to a resource to\n indicate that it pertains to your organization's sales department. You can also use tags\n for tag-based access control.
To learn more about tagging, see Tagging resources.
" } }, "com.amazonaws.transcribe#TagKey": { @@ -4611,7 +4599,7 @@ } ], "traits": { - "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to the specified\n resource.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
", + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to the specified\n resource.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
", "smithy.api#http": { "method": "PUT", "uri": "/tags/{ResourceArn}", @@ -4625,7 +4613,7 @@ "ResourceArn": { "target": "com.amazonaws.transcribe#TranscribeArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the resource you want to tag. ARNs have the format\n arn:partition:service:region:account-id:resource-type/resource-id
.
For example,\n arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
,\n medical-transcription-job
, vocabulary
,\n medical-vocabulary
, vocabulary-filter
, and\n language-model
.
The Amazon Resource Name (ARN) of the resource you want to tag. ARNs have the format\n arn:partition:service:region:account-id:resource-type/resource-id
.
For example,\n arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
,\n medical-transcription-job
, vocabulary
,\n medical-vocabulary
, vocabulary-filter
, and\n language-model
.
Adds one or more custom tags, each in the form of a key:value pair, to the specified\n resource.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
", + "smithy.api#documentation": "Adds one or more custom tags, each in the form of a key:value pair, to the specified\n resource.
\nTo learn more about using tags with Amazon Transcribe, refer to Tagging\n resources.
", "smithy.api#required": {} } } @@ -4795,7 +4783,7 @@ "name": "transcribe" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Transcribe offers three main types of batch transcription: Standard, Medical, and\n Call Analytics.
\n\n Standard transcriptions are the most common\n option. Refer to for details.
\n\n Medical transcriptions are tailored to\n medical professionals and incorporate medical terms. A common use case for this\n service is transcribing doctor-patient dialogue into after-visit notes. Refer to\n for details.
\n\n Call Analytics transcriptions are designed\n for use with call center audio on two different channels; if you're looking for\n insight into customer service calls, use this option. Refer to for details.
\nAmazon Transcribe offers three main types of batch transcription: Standard, Medical, and\n Call Analytics.
\n\n Standard transcriptions are the most common\n option. Refer to for details.
\n\n Medical transcriptions are tailored to\n medical professionals and incorporate medical terms. A common use case for this\n service is transcribing doctor-patient dialogue into after-visit notes. Refer to\n for details.
\n\n Call Analytics transcriptions are designed\n for use with call center audio on two different channels; if you're looking for\n insight into customer service calls, use this option. Refer to for details.
\nThe Amazon S3 location of your transcript. You can use this URI to access or\n download your transcript.
\nIf you included OutputBucketName
in your transcription job request, this\n is the URI of that bucket. If you also included OutputKey
in your request,\n your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request,\n your transcript is stored in a service-managed bucket, and\n TranscriptFileUri
provides you with a temporary URI you can use for\n secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15\n minutes. If you get an AccesDenied
error, you can get a new temporary\n URI by running a GetTranscriptionJob
or\n ListTranscriptionJob
request.
The Amazon S3 location of your transcript. You can use this URI to access or\n download your transcript.
\nIf you included OutputBucketName
in your transcription job request, this\n is the URI of that bucket. If you also included OutputKey
in your request,\n your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request,\n your transcript is stored in a service-managed bucket, and\n TranscriptFileUri
provides you with a temporary URI you can use for\n secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15\n minutes. If you get an AccesDenied
error, you can get a new temporary\n URI by running a GetTranscriptionJob
or\n ListTranscriptionJob
request.
The Amazon S3 location of your redacted transcript. You can use this URI to\n access or download your transcript.
\nIf you included OutputBucketName
in your transcription job request, this\n is the URI of that bucket. If you also included OutputKey
in your request,\n your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request,\n your transcript is stored in a service-managed bucket, and\n RedactedTranscriptFileUri
provides you with a temporary URI you can use\n for secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15\n minutes. If you get an AccesDenied
error, you can get a new temporary\n URI by running a GetTranscriptionJob
or\n ListTranscriptionJob
request.
The Amazon S3 location of your redacted transcript. You can use this URI to\n access or download your transcript.
\nIf you included OutputBucketName
in your transcription job request, this\n is the URI of that bucket. If you also included OutputKey
in your request,\n your output is located in the path you specified in your request.
If you didn't include OutputBucketName
in your transcription job request,\n your transcript is stored in a service-managed bucket, and\n RedactedTranscriptFileUri
provides you with a temporary URI you can use\n for secure access to your transcript.
Temporary URIs for service-managed Amazon S3 buckets are only valid for 15\n minutes. If you get an AccesDenied
error, you can get a new temporary\n URI by running a GetTranscriptionJob
or\n ListTranscriptionJob
request.
Flag the presence or absence of an exact match to the phrases that you specify. For\n example, if you specify the phrase \"speak to a manager\" as your Targets
\n value, only that exact phrase is flagged.
Note that semantic matching is not supported. For example, if your customer says\n \"speak to the manager\", instead of \"speak to a\n manager\", your content is not flagged.
", + "smithy.api#documentation": "Flag the presence or absence of an exact match to the phrases that you specify. For\n example, if you specify the phrase \"speak to a manager\" as your Targets
\n value, only that exact phrase is flagged.
Note that semantic matching is not supported. For example, if your customer says\n \"speak to the manager\", instead of \"speak to a\n manager\", your content is not flagged.
", "smithy.api#required": {} } }, @@ -6559,7 +6556,7 @@ } }, "traits": { - "smithy.api#documentation": "Flag the presence or absence of specific words or phrases detected in your Call\n Analytics transcription output.
\nRules using TranscriptFilter
are designed to match:
Custom words or phrases spoken by the agent, the customer, or both
\nCustom words or phrases not spoken by the\n agent, the customer, or either
\nCustom words or phrases that occur at a specific time frame
\nSee Rule criteria for batch \n categories and Rule criteria for \n streaming categories for usage examples.
" + "smithy.api#documentation": "Flag the presence or absence of specific words or phrases detected in your Call\n Analytics transcription output.
\nRules using TranscriptFilter
are designed to match:
Custom words or phrases spoken by the agent, the customer, or both
\nCustom words or phrases not spoken by the\n agent, the customer, or either
\nCustom words or phrases that occur at a specific time frame
\nSee Rule criteria for batch \n categories and Rule criteria for \n streaming categories for usage examples.
" } }, "com.amazonaws.transcribe#TranscriptFilterType": { @@ -6585,7 +6582,7 @@ "TranscriptionJobStatus": { "target": "com.amazonaws.transcribe#TranscriptionJobStatus", "traits": { - "smithy.api#documentation": "Provides the status of the specified transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
Provides the status of the specified transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
The date and time the specified transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
If TranscriptionJobStatus
is FAILED
,\n FailureReason
contains information about why the transcription job\n request failed.
The FailureReason
field contains one of the following values:
\n Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to\n MediaFormat for a list of supported\n formats.
\n The media format provided does not match the detected media\n format
.
The media format specified in MediaFormat
doesn't match the\n format of the input file. Check the media format of your media file and correct\n the specified value.
\n Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid.\n The sample rate must be between 8,000 and 48,000 hertz.
\n The sample rate provided does not match the detected sample\n rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match\n the sample rate detected in your input media file. Check the sample rate of your\n media file and correct the specified value.
\n Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can\n process. For more information, refer to Guidelines and\n quotas.
\n\n Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process.\n For more information, refer to Guidelines and\n quotas.
\nIf TranscriptionJobStatus
is FAILED
,\n FailureReason
contains information about why the transcription job\n request failed.
The FailureReason
field contains one of the following values:
\n Unsupported media format
.
The media format specified in MediaFormat
isn't valid. Refer to\n MediaFormat for a list of supported\n formats.
\n The media format provided does not match the detected media\n format
.
The media format specified in MediaFormat
doesn't match the\n format of the input file. Check the media format of your media file and correct\n the specified value.
\n Invalid sample rate for audio file
.
The sample rate specified in MediaSampleRateHertz
isn't valid.\n The sample rate must be between 8,000 and 48,000 hertz.
\n The sample rate provided does not match the detected sample\n rate
.
The sample rate specified in MediaSampleRateHertz
doesn't match\n the sample rate detected in your input media file. Check the sample rate of your\n media file and correct the specified value.
\n Invalid file size: file size too large
.
The size of your media file is larger than what Amazon Transcribe can\n process. For more information, refer to Guidelines and\n quotas.
\n\n Invalid number of channels: number of channels too large
.
Your audio contains more channels than Amazon Transcribe is able to process.\n For more information, refer to Guidelines and\n quotas.
\nThe confidence score associated with the language identified in your media\n file.
\nConfidence scores are values between 0 and 1; a larger value indicates a higher\n probability that the identified language correctly matches the language spoken in your\n media.
" + "smithy.api#documentation": "The confidence score associated with the language identified in your media\n file.
\nConfidence scores are values between 0 and 1; a larger value indicates a higher\n probability that the identified language correctly matches the language spoken in your\n media.
" } }, "LanguageCodes": { @@ -6716,7 +6713,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides detailed information about a transcription job.
\nTo view the status of the specified transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished and you can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
If you enabled content redaction, the redacted transcript can be found at the location\n specified in RedactedTranscriptFileUri
.
Provides detailed information about a transcription job.
\nTo view the status of the specified transcription job, check the\n TranscriptionJobStatus
field. If the status is COMPLETED
,\n the job is finished and you can find the results at the location specified in\n TranscriptFileUri
. If the status is FAILED
,\n FailureReason
provides details on why your transcription job\n failed.
If you enabled content redaction, the redacted transcript can be found at the location\n specified in RedactedTranscriptFileUri
.
The date and time the specified transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job request was made.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time your transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time your transcription job began processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.789000-07:00
represents a transcription job\n that started processing at 12:32 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
The date and time the specified transcription job finished processing.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:33:13.922000-07:00
represents a transcription job\n that started processing at 12:33 PM UTC-7 on May 4, 2022.
Provides the status of your transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
Provides the status of your transcription job.
\nIf the status is COMPLETED
, the job is finished and you can find the\n results at the location specified in TranscriptFileUri
(or\n RedactedTranscriptFileUri
, if you requested transcript redaction). If\n the status is FAILED
, FailureReason
provides details on why\n your transcription job failed.
Indicates where the specified transcription output is stored.
\nIf the value is CUSTOMER_BUCKET
, the location is the Amazon S3\n bucket you specified using the OutputBucketName
parameter in your request. If you also included\n OutputKey
in your request, your output is located in the path you\n specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed Amazon S3 bucket. To access a transcript stored in a service-managed bucket, use the\n URI shown in the TranscriptFileUri
or\n RedactedTranscriptFileUri
field.
Indicates where the specified transcription output is stored.
\nIf the value is CUSTOMER_BUCKET
, the location is the Amazon S3\n bucket you specified using the OutputBucketName
parameter in your request. If you also included\n OutputKey
in your request, your output is located in the path you\n specified in your request.
If the value is SERVICE_BUCKET
, the location is a service-managed Amazon S3 bucket. To access a transcript stored in a service-managed bucket, use the\n URI shown in the TranscriptFileUri
or\n RedactedTranscriptFileUri
field.
The confidence score associated with the language identified in your media\n file.
\nConfidence scores are values between 0 and 1; a larger value indicates a higher\n probability that the identified language correctly matches the language spoken in your\n media.
" + "smithy.api#documentation": "The confidence score associated with the language identified in your media\n file.
\nConfidence scores are values between 0 and 1; a larger value indicates a higher\n probability that the identified language correctly matches the language spoken in your\n media.
" } }, "LanguageCodes": { @@ -6896,7 +6893,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes the specified tags from the specified Amazon Transcribe resource.
\nIf you include UntagResource
in your request, you must also include\n ResourceArn
and TagKeys
.
Removes the specified tags from the specified Amazon Transcribe resource.
\nIf you include UntagResource
in your request, you must also include\n ResourceArn
and TagKeys
.
The Amazon Resource Name (ARN) of the Amazon Transcribe resource you want to remove\n tags from. ARNs have the format\n arn:partition:service:region:account-id:resource-type/resource-id
.
For example,\n arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
,\n medical-transcription-job
, vocabulary
,\n medical-vocabulary
, vocabulary-filter
, and\n language-model
.
The Amazon Resource Name (ARN) of the Amazon Transcribe resource you want to remove\n tags from. ARNs have the format\n arn:partition:service:region:account-id:resource-type/resource-id
.
For example,\n arn:aws:transcribe:us-west-2:111122223333:transcription-job/transcription-job-name
.
Valid values for resource-type
are: transcription-job
,\n medical-transcription-job
, vocabulary
,\n medical-vocabulary
, vocabulary-filter
, and\n language-model
.
Updates the specified Call Analytics category with new rules. Note that the\n UpdateCallAnalyticsCategory
operation overwrites all existing rules\n contained in the specified category. You cannot append additional rules onto an existing\n category.
To create a new category, see .
", + "smithy.api#documentation": "Updates the specified Call Analytics category with new rules. Note that the\n UpdateCallAnalyticsCategory
operation overwrites all existing rules\n contained in the specified category. You cannot append additional rules onto an existing\n category.
To create a new category, see .
", "smithy.api#http": { "method": "PATCH", "uri": "/callanalyticscategories/{CategoryName}", @@ -7054,7 +7051,7 @@ "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the text file that contains your custom medical\n vocabulary. The URI must be located in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
The Amazon S3 location of the text file that contains your custom medical\n vocabulary. The URI must be located in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
The date and time the specified custom medical vocabulary was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom medical vocabulary was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
Use this parameter if you want to update your custom vocabulary filter by including\n all desired terms, as comma-separated values, within your request. The other option for\n updating your vocabulary filter is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use\n VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" + "smithy.api#documentation": "Use this parameter if you want to update your custom vocabulary filter by including\n all desired terms, as comma-separated values, within your request. The other option for\n updating your vocabulary filter is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFilterFileUri
parameter.
Note that if you include Words
in your request, you cannot use\n VocabularyFilterFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" } }, "VocabularyFilterFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the text file that contains your custom vocabulary\n filter terms. The URI must be located in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-filter-file.txt
\n
Note that if you include VocabularyFilterFileUri
in your request, you\n cannot use Words
; you must choose one or the other.
The Amazon S3 location of the text file that contains your custom vocabulary\n filter terms. The URI must be located in the same Amazon Web Services Region as the\n resource you're calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-filter-file.txt
\n
Note that if you include VocabularyFilterFileUri
in your request, you\n cannot use Words
; you must choose one or the other.
The date and time the specified custom vocabulary filter was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary filter was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The language code that represents the language of the entries in the custom vocabulary\n you want to update. Each custom vocabulary must contain terms in only one\n language.
\nA custom vocabulary can only be used to transcribe files in the same language as the\n custom vocabulary. For example, if you create a custom vocabulary using US English\n (en-US
), you can only apply this custom vocabulary to files that\n contain English audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
", + "smithy.api#documentation": "The language code that represents the language of the entries in the custom vocabulary\n you want to update. Each custom vocabulary must contain terms in only one\n language.
\nA custom vocabulary can only be used to transcribe files in the same language as the\n custom vocabulary. For example, if you create a custom vocabulary using US English\n (en-US
), you can only apply this custom vocabulary to files that\n contain English audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
", "smithy.api#required": {} } }, "Phrases": { "target": "com.amazonaws.transcribe#Phrases", "traits": { - "smithy.api#documentation": "Use this parameter if you want to update your custom vocabulary by including all\n desired terms, as comma-separated values, within your request. The other option for\n updating your custom vocabulary is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use\n VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" + "smithy.api#documentation": "Use this parameter if you want to update your custom vocabulary by including all\n desired terms, as comma-separated values, within your request. The other option for\n updating your custom vocabulary is to save your entries in a text file and upload them\n to an Amazon S3 bucket, then specify the location of your file using the\n VocabularyFileUri
parameter.
Note that if you include Phrases
in your request, you cannot use\n VocabularyFileUri
; you must choose one or the other.
Each language has a character set that contains all allowed characters for that\n specific language. If you use unsupported characters, your custom vocabulary filter\n request fails. Refer to Character Sets for Custom\n Vocabularies to get the character set for your language.
" } }, "VocabularyFileUri": { "target": "com.amazonaws.transcribe#Uri", "traits": { - "smithy.api#documentation": "The Amazon S3 location of the text file that contains your custom vocabulary.\n The URI must be located in the same Amazon Web Services Region as the resource you're\n calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
Note that if you include VocabularyFileUri
in your request, you cannot\n use the Phrases
flag; you must choose one or the other.
The Amazon S3 location of the text file that contains your custom vocabulary.\n The URI must be located in the same Amazon Web Services Region as the resource you're\n calling.
\nHere's an example URI path:\n s3://DOC-EXAMPLE-BUCKET/my-vocab-file.txt
\n
Note that if you include VocabularyFileUri
in your request, you cannot\n use the Phrases
flag; you must choose one or the other.
The date and time the specified custom vocabulary was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary was last updated.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The language code that represents the language of the entries in your vocabulary\n filter. Each custom vocabulary filter must contain terms in only one language.
\nA custom vocabulary filter can only be used to transcribe files in the same language\n as the filter. For example, if you create a custom vocabulary filter using US English\n (en-US
), you can only apply this filter to files that contain English\n audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
" + "smithy.api#documentation": "The language code that represents the language of the entries in your vocabulary\n filter. Each custom vocabulary filter must contain terms in only one language.
\nA custom vocabulary filter can only be used to transcribe files in the same language\n as the filter. For example, if you create a custom vocabulary filter using US English\n (en-US
), you can only apply this filter to files that contain English\n audio.
For a list of supported languages and their associated language codes, refer to the\n Supported languages table.
" } }, "LastModifiedTime": { "target": "com.amazonaws.transcribe#DateTime", "traits": { - "smithy.api#documentation": "The date and time the specified custom vocabulary filter was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary filter was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The language code used to create your custom vocabulary. Each custom vocabulary must\n contain terms in only one language.
\nA custom vocabulary can only be used to transcribe files in the same language as the\n custom vocabulary. For example, if you create a custom vocabulary using US English\n (en-US
), you can only apply this custom vocabulary to files that\n contain English audio.
The language code used to create your custom vocabulary. Each custom vocabulary must\n contain terms in only one language.
\nA custom vocabulary can only be used to transcribe files in the same language as the\n custom vocabulary. For example, if you create a custom vocabulary using US English\n (en-US
), you can only apply this custom vocabulary to files that\n contain English audio.
The date and time the specified custom vocabulary was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The date and time the specified custom vocabulary was last modified.
\nTimestamps are in the format YYYY-MM-DD'T'HH:MM:SS.SSSSSS-UTC
. For\n example, 2022-05-04T12:32:58.761000-07:00
represents 12:32 PM UTC-7 on May\n 4, 2022.
The algorithm that is used to encrypt the file.
" + "smithy.api#documentation": "The algorithm that is used to encrypt the file.
\nYou can only specify NONE
if the URL for your connector uses HTTPS. This ensures that\n no traffic is sent in clear text.
The signing algorithm for the MDN response.
\nIf set to DEFAULT (or not set at all), the value for SigningAlogorithm
is used.
The signing algorithm for the MDN response.
\nIf set to DEFAULT (or not set at all), the value for SigningAlgorithm
is used.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn additon to a workflow to execute when a file is uploaded completely, WorkflowDeatails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
The public portion of the Secure Shell (SSH) key used to authenticate the user to the\n server.
\nTransfer Family accepts RSA, ECDSA, and ED25519 keys.
" + "smithy.api#documentation": "The public portion of the Secure Shell (SSH) key used to authenticate the user to the\n server.
\nThe three standard SSH public key format elements are
,\n , and an optional
, with spaces\n between each element.
Transfer Family accepts RSA, ECDSA, and ED25519 keys.
\nFor RSA keys, the key type is ssh-rsa
.
For ED25519 keys, the key type is ssh-ed25519
.
For ECDSA keys, the key type is either ecdsa-sha2-nistp256
,\n ecdsa-sha2-nistp384
, or ecdsa-sha2-nistp521
, depending on the\n size of the key you generated.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn additon to a workflow to execute when a file is uploaded completely, WorkflowDeatails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn additon to a workflow to execute when a file is uploaded completely, WorkflowDeatails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
To remove an associated workflow from a server, you can provide an empty OnUpload
object, as in the following example.
\n aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'
\n
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
To remove an associated workflow from a server, you can provide an empty OnUpload
object, as in the following example.
\n aws transfer update-server --server-id s-01234567890abcdef --workflow-details '{\"OnUpload\":[]}'
\n
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn additon to a workflow to execute when a file is uploaded completely, WorkflowDeatails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
Specifies the workflow ID for the workflow to assign and the execution role that's used for executing the workflow.
\nIn addition to a workflow to execute when a file is uploaded completely, WorkflowDetails
can also contain a\n workflow ID (and execution role) for a workflow to execute on partial upload. A partial upload occurs when a file is open when\n the session disconnects.
Details for a step that creates one or more tags.
\nYou specify one or more tags: each tag contains a key/value pair.
" } - }, - "DecryptStepDetails": { - "target": "com.amazonaws.transfer#DecryptStepDetails" } }, "traits": { @@ -9516,12 +9500,6 @@ "traits": { "smithy.api#enumValue": "DELETE" } - }, - "DECRYPT": { - "target": "smithy.api#Unit", - "traits": { - "smithy.api#enumValue": "DECRYPT" - } } } }, diff --git a/codegen/sdk/aws-models/translate.json b/codegen/sdk/aws-models/translate.json index 991fedae091..340e472fedf 100644 --- a/codegen/sdk/aws-models/translate.json +++ b/codegen/sdk/aws-models/translate.json @@ -100,14 +100,14 @@ "name": "translate" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Provides translation of the input content from the source language to the target language.
", + "smithy.api#documentation": "Provides language translation for input text in the source language to the specified target language.
", "smithy.api#title": "Amazon Translate", "smithy.rules#endpointRuleSet": { "version": "1.0", "parameters": { "Region": { "builtIn": "AWS::Region", - "required": true, + "required": false, "documentation": "The AWS region used to dispatch the request.", "type": "String" }, @@ -156,6 +156,15 @@ "ref": "Endpoint" } ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], "type": "tree", @@ -418,9 +427,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-south-1", "UseDualStack": true, - "Region": "ap-south-1" + "UseFIPS": true } }, { @@ -431,9 +440,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-south-1", "UseDualStack": false, - "Region": "ap-south-1" + "UseFIPS": true } }, { @@ -444,9 +453,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-south-1", "UseDualStack": true, - "Region": "ap-south-1" + "UseFIPS": false } }, { @@ -457,9 +466,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-south-1", "UseDualStack": false, - "Region": "ap-south-1" + "UseFIPS": false } }, { @@ -470,9 +479,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ca-central-1", "UseDualStack": true, - "Region": "ca-central-1" + "UseFIPS": true } }, { @@ -483,9 +492,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ca-central-1", "UseDualStack": false, - "Region": "ca-central-1" + "UseFIPS": true } }, { @@ -496,9 +505,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ca-central-1", "UseDualStack": true, - "Region": "ca-central-1" + "UseFIPS": false } }, { @@ -509,9 +518,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ca-central-1", "UseDualStack": false, - "Region": "ca-central-1" + "UseFIPS": false } }, { @@ -522,9 +531,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-central-1", "UseDualStack": true, - "Region": "eu-central-1" + "UseFIPS": true } }, { @@ -535,9 +544,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-central-1", "UseDualStack": false, - "Region": "eu-central-1" + "UseFIPS": true } }, { @@ -548,9 +557,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-central-1", "UseDualStack": true, - "Region": "eu-central-1" + "UseFIPS": false } }, { @@ -561,9 +570,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-central-1", "UseDualStack": false, - "Region": "eu-central-1" + "UseFIPS": false } }, { @@ -574,9 +583,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-1", "UseDualStack": true, - "Region": "us-west-1" + "UseFIPS": true } }, { @@ -587,9 +596,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-1", "UseDualStack": false, - "Region": "us-west-1" + "UseFIPS": true } }, { @@ -600,9 +609,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-1", "UseDualStack": true, - "Region": "us-west-1" + "UseFIPS": false } }, { @@ -613,9 +622,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-1", "UseDualStack": false, - "Region": "us-west-1" + "UseFIPS": false } }, { @@ -626,9 +635,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-2", "UseDualStack": true, - "Region": "us-west-2" + "UseFIPS": true } }, { @@ -639,9 +648,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-west-2", "UseDualStack": false, - "Region": "us-west-2" + "UseFIPS": true } }, { @@ -652,9 +661,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-2", "UseDualStack": true, - "Region": "us-west-2" + "UseFIPS": false } }, { @@ -665,9 +674,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-west-2", "UseDualStack": false, - "Region": "us-west-2" + "UseFIPS": false } }, { @@ -678,9 +687,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-north-1", "UseDualStack": true, - "Region": "eu-north-1" + "UseFIPS": true } }, { @@ -691,9 +700,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-north-1", "UseDualStack": false, - "Region": "eu-north-1" + "UseFIPS": true } }, { @@ -704,9 +713,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-north-1", "UseDualStack": true, - "Region": "eu-north-1" + "UseFIPS": false } }, { @@ -717,9 +726,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-north-1", "UseDualStack": false, - "Region": "eu-north-1" + "UseFIPS": false } }, { @@ -730,9 +739,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-3", "UseDualStack": true, - "Region": "eu-west-3" + "UseFIPS": true } }, { @@ -743,9 +752,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-3", "UseDualStack": false, - "Region": "eu-west-3" + "UseFIPS": true } }, { @@ -756,9 +765,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-3", "UseDualStack": true, - "Region": "eu-west-3" + "UseFIPS": false } }, { @@ -769,9 +778,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-3", "UseDualStack": false, - "Region": "eu-west-3" + "UseFIPS": false } }, { @@ -782,9 +791,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-2", "UseDualStack": true, - "Region": "eu-west-2" + "UseFIPS": true } }, { @@ -795,9 +804,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-2", "UseDualStack": false, - "Region": "eu-west-2" + "UseFIPS": true } }, { @@ -808,9 +817,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-2", "UseDualStack": true, - "Region": "eu-west-2" + "UseFIPS": false } }, { @@ -821,9 +830,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-2", "UseDualStack": false, - "Region": "eu-west-2" + "UseFIPS": false } }, { @@ -834,9 +843,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-1", "UseDualStack": true, - "Region": "eu-west-1" + "UseFIPS": true } }, { @@ -847,9 +856,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "eu-west-1", "UseDualStack": false, - "Region": "eu-west-1" + "UseFIPS": true } }, { @@ -860,9 +869,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-1", "UseDualStack": true, - "Region": "eu-west-1" + "UseFIPS": false } }, { @@ -873,9 +882,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "eu-west-1", "UseDualStack": false, - "Region": "eu-west-1" + "UseFIPS": false } }, { @@ -886,9 +895,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-2", "UseDualStack": true, - "Region": "ap-northeast-2" + "UseFIPS": true } }, { @@ -899,9 +908,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-2", "UseDualStack": false, - "Region": "ap-northeast-2" + "UseFIPS": true } }, { @@ -912,9 +921,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-2", "UseDualStack": true, - "Region": "ap-northeast-2" + "UseFIPS": false } }, { @@ -925,9 +934,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-2", "UseDualStack": false, - "Region": "ap-northeast-2" + "UseFIPS": false } }, { @@ -938,9 +947,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-1", "UseDualStack": true, - "Region": "ap-northeast-1" + "UseFIPS": true } }, { @@ -951,9 +960,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-northeast-1", "UseDualStack": false, - "Region": "ap-northeast-1" + "UseFIPS": true } }, { @@ -964,9 +973,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-1", "UseDualStack": true, - "Region": "ap-northeast-1" + "UseFIPS": false } }, { @@ -977,9 +986,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-northeast-1", "UseDualStack": false, - "Region": "ap-northeast-1" + "UseFIPS": false } }, { @@ -990,9 +999,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-east-1", "UseDualStack": true, - "Region": "ap-east-1" + "UseFIPS": true } }, { @@ -1003,9 +1012,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-east-1", "UseDualStack": false, - "Region": "ap-east-1" + "UseFIPS": true } }, { @@ -1016,9 +1025,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-east-1", "UseDualStack": true, - "Region": "ap-east-1" + "UseFIPS": false } }, { @@ -1029,9 +1038,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-east-1", "UseDualStack": false, - "Region": "ap-east-1" + "UseFIPS": false } }, { @@ -1042,9 +1051,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-gov-west-1", "UseDualStack": true, - "Region": "us-gov-west-1" + "UseFIPS": true } }, { @@ -1055,9 +1064,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-gov-west-1", "UseDualStack": false, - "Region": "us-gov-west-1" + "UseFIPS": true } }, { @@ -1068,9 +1077,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-gov-west-1", "UseDualStack": true, - "Region": "us-gov-west-1" + "UseFIPS": false } }, { @@ -1081,9 +1090,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-gov-west-1", "UseDualStack": false, - "Region": "us-gov-west-1" + "UseFIPS": false } }, { @@ -1094,9 +1103,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-1", "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": true } }, { @@ -1107,9 +1116,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-1", "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": true } }, { @@ -1120,9 +1129,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-1", "UseDualStack": true, - "Region": "ap-southeast-1" + "UseFIPS": false } }, { @@ -1133,9 +1142,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-1", "UseDualStack": false, - "Region": "ap-southeast-1" + "UseFIPS": false } }, { @@ -1146,9 +1155,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-2", "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": true } }, { @@ -1159,9 +1168,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "ap-southeast-2", "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": true } }, { @@ -1172,9 +1181,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-2", "UseDualStack": true, - "Region": "ap-southeast-2" + "UseFIPS": false } }, { @@ -1185,9 +1194,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "ap-southeast-2", "UseDualStack": false, - "Region": "ap-southeast-2" + "UseFIPS": false } }, { @@ -1196,9 +1205,9 @@ "error": "FIPS and DualStack are enabled, but this partition does not support one or both" }, "params": { - "UseFIPS": true, + "Region": "us-iso-east-1", "UseDualStack": true, - "Region": "us-iso-east-1" + "UseFIPS": true } }, { @@ -1209,9 +1218,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-iso-east-1", "UseDualStack": false, - "Region": "us-iso-east-1" + "UseFIPS": true } }, { @@ -1220,9 +1229,9 @@ "error": "DualStack is enabled but this partition does not support DualStack" }, "params": { - "UseFIPS": false, + "Region": "us-iso-east-1", "UseDualStack": true, - "Region": "us-iso-east-1" + "UseFIPS": false } }, { @@ -1233,9 +1242,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-iso-east-1", "UseDualStack": false, - "Region": "us-iso-east-1" + "UseFIPS": false } }, { @@ -1246,9 +1255,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-1", "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": true } }, { @@ -1259,9 +1268,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-east-1" + "UseFIPS": true } }, { @@ -1272,9 +1281,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": true, - "Region": "us-east-1" + "UseFIPS": false } }, { @@ -1285,9 +1294,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-1", "UseDualStack": false, - "Region": "us-east-1" + "UseFIPS": false } }, { @@ -1298,9 +1307,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-2", "UseDualStack": true, - "Region": "us-east-2" + "UseFIPS": true } }, { @@ -1311,9 +1320,9 @@ } }, "params": { - "UseFIPS": true, + "Region": "us-east-2", "UseDualStack": false, - "Region": "us-east-2" + "UseFIPS": true } }, { @@ -1324,9 +1333,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-2", "UseDualStack": true, - "Region": "us-east-2" + "UseFIPS": false } }, { @@ -1337,9 +1346,9 @@ } }, "params": { - "UseFIPS": false, + "Region": "us-east-2", "UseDualStack": false, - "Region": "us-east-2" + "UseFIPS": false } }, { @@ -1350,9 +1359,9 @@ } }, "params": { - "UseFIPS": false, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": false, "Endpoint": "https://example.com" } }, @@ -1362,9 +1371,9 @@ "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "UseFIPS": true, - "UseDualStack": false, "Region": "us-east-1", + "UseDualStack": false, + "UseFIPS": true, "Endpoint": "https://example.com" } }, @@ -1374,9 +1383,9 @@ "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "UseFIPS": false, - "UseDualStack": true, "Region": "us-east-1", + "UseDualStack": true, + "UseFIPS": false, "Endpoint": "https://example.com" } } @@ -1416,9 +1425,9 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 10000 + "max": 5000 }, - "smithy.api#pattern": "^[\\P{M}\\p{M}]{1,10000}$" + "smithy.api#pattern": "^[\\P{M}\\p{M}]{1,5000}$" } }, "com.amazonaws.translate#ClientTokenString": { @@ -2117,7 +2126,7 @@ "S3Uri": { "target": "com.amazonaws.translate#S3Uri", "traits": { - "smithy.api#documentation": "The URI of the AWS S3 folder that contains the input files. Amazon Translate translates all the\n files in the folder and all its sub-folders. The folder must be in the same Region as the API endpoint you are\n calling.
", + "smithy.api#documentation": "The URI of the AWS S3 folder that contains the input files. Amazon Translate translates all the\n files in the folder. The folder must be in the same Region as the API endpoint you are\n calling.
\nThe URI can also point to a single input document, or it can provide the prefix for a collection of\n input documents. For example. if you use the URI S3://bucketName/prefix
and the\n prefix is a single file, Amazon Translate uses that files as input. If more than one file begins with the\n prefix, Amazon Translate uses all of them as input.
Lists all tags associated with a given Amazon Translate resource.\n For more information, see \n Tagging your resources.
" + "smithy.api#documentation": "Lists all tags associated with a given Amazon Translate resource. \n For more information, see \n Tagging your resources.
" } }, "com.amazonaws.translate#ListTagsForResourceRequest": { @@ -3040,7 +3049,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts an asynchronous batch translation job. Use batch translation jobs to\n translate large volumes of text across multiple documents at once.\n For batch translation, you can input documents with different source languages (specify auto
\n as the source language). You can specify one\n or more target languages. Batch translation translates each input document into each of the target languages.\n For more information, see\n Asynchronous batch processing.
Batch translation jobs can be described with the DescribeTextTranslationJob operation, listed with the ListTextTranslationJobs operation, and stopped with the StopTextTranslationJob operation.
" + "smithy.api#documentation": "Starts an asynchronous batch translation job. Use batch translation jobs to\n translate large volumes of text across multiple documents at once. \n For batch translation, the input documents must share the same source language. You can specify one \n or more target languages. Batch translation translates each input document into each of the target languages.\n For more information, see\n Asynchronous batch processing \n
\n\nBatch translation jobs can be described with the DescribeTextTranslationJob operation, listed with the ListTextTranslationJobs operation, and stopped with the StopTextTranslationJob operation.
\nAmazon Translate does not support batch translation of multiple source languages at once.
\nThe Amazon Resource Name (ARN) of an AWS Identity Access and Management (IAM) role\n that grants Amazon Translate read access to your input data. For more information, see\n Identity and access management .
", + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an AWS Identity Access and Management (IAM) role\n that grants Amazon Translate read access to your input data. For more information, see \n Identity and access management .
", "smithy.api#required": {} } }, "SourceLanguageCode": { "target": "com.amazonaws.translate#LanguageCodeString", "traits": { - "smithy.api#documentation": "The language code of the input language. Specify the language if all input documents share the same language.\n If you don't know the language of the source files, or your input documents contains different source\n languages, select auto
. Amazon Translate auto detects the source language for each input document.\n For a list of supported language codes, see\n Supported languages.
The language code of the input language. For a list of language codes, see \n Supported languages.
\nAmazon Translate does not automatically detect a source language during batch translation jobs.
", "smithy.api#required": {} } }, "TargetLanguageCodes": { "target": "com.amazonaws.translate#TargetLanguageCodeStringList", "traits": { - "smithy.api#documentation": "The target languages of the translation job. Enter up to 10 language codes.\n Each input file is translated into each target language.
\nEach language code is 2 or 5 characters long. For a list of language codes, see Supported languages.
", + "smithy.api#documentation": "The target languages of the translation job. Enter up to 10 language codes. \n Each input file is translated into each target language.
\nEach language code is two or five characters long. For a list of language codes, see \n Supported languages.
", "smithy.api#required": {} } }, @@ -3096,7 +3105,7 @@ "ParallelDataNames": { "target": "com.amazonaws.translate#ResourceNameList", "traits": { - "smithy.api#documentation": "The name of a parallel data resource to add to the translation job. This resource consists\n of examples that show how you want segments of text to be translated.\n If you specify multiple target languages for\n the job, the parallel data file must include translations for\n all the target languages.
\nWhen you add parallel\n data to a translation job, you create an Active Custom Translation job.
\nThis parameter accepts only one parallel data resource.
\nActive Custom Translation jobs are priced at a higher rate than other jobs that don't\n use parallel data. For more information, see Amazon Translate pricing.
\nFor a list of available parallel data resources, use the ListParallelData operation.
\nFor more information, see \n Customizing your translations with parallel data.
" + "smithy.api#documentation": "The name of a parallel data resource to add to the translation job. This resource consists\n of examples that show how you want segments of text to be translated. \n If you specify multiple target languages for\n the job, the parallel data file must include translations for\n all the target languages.
\nWhen you add parallel\n data to a translation job, you create an Active Custom Translation job.
\nThis parameter accepts only one parallel data resource.
\nActive Custom Translation jobs are priced at a higher rate than other jobs that don't\n use parallel data. For more information, see Amazon Translate pricing.
\nFor a list of available parallel data resources, use the ListParallelData operation.
\nFor more information, see \n Customizing your translations with parallel data.
" } }, "ClientToken": { @@ -3275,7 +3284,7 @@ } ], "traits": { - "smithy.api#documentation": "Associates a specific tag with a resource. A tag is a key-value pair\n that adds as a metadata to a resource.\n For more information, see \n Tagging your resources.
" + "smithy.api#documentation": "Associates a specific tag with a resource. A tag is a key-value pair\n that adds as a metadata to a resource. \n For more information, see \n Tagging your resources.
" } }, "com.amazonaws.translate#TagResourceRequest": { @@ -3381,7 +3390,7 @@ } }, "traits": { - "smithy.api#documentation": "The data associated with the custom terminology. For information about the custom terminology file, see\n \n Creating a Custom Terminology.
" + "smithy.api#documentation": "The data associated with the custom terminology. For information about the custom terminology file, see \n \n Creating a Custom Terminology.
" } }, "com.amazonaws.translate#TerminologyDataFormat": { @@ -3762,7 +3771,7 @@ "Text": { "target": "com.amazonaws.translate#BoundedLengthString", "traits": { - "smithy.api#documentation": "The text to translate. The text string can be a maximum of 10,000 bytes long. Depending on\n your character set, this may be fewer than 10,000 characters.
", + "smithy.api#documentation": "The text to translate. The text string can be a maximum of 5,000 bytes long. Depending on\n your character set, this may be fewer than 5,000 characters.
", "smithy.api#required": {} } }, @@ -3798,7 +3807,7 @@ "type": "structure", "members": { "TranslatedText": { - "target": "com.amazonaws.translate#TranslatedTextString", + "target": "com.amazonaws.translate#String", "traits": { "smithy.api#documentation": "The translated text.
", "smithy.api#required": {} @@ -3832,34 +3841,24 @@ } } }, - "com.amazonaws.translate#TranslatedTextString": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 0, - "max": 20000 - }, - "smithy.api#pattern": "^[\\P{M}\\p{M}]{0,20000}$" - } - }, "com.amazonaws.translate#TranslationSettings": { "type": "structure", "members": { "Formality": { "target": "com.amazonaws.translate#Formality", "traits": { - "smithy.api#documentation": "You can optionally specify the desired level of formality for translations\n to supported target languages. The formality\n setting controls the level of formal language usage (also known as register) in the\n translation output. You can set the value to informal or formal. If you don't specify a value for\n formality, or if the target language doesn't support formality, the translation will\n ignore the formality setting.
\nIf you specify multiple target languages for the job, translate ignores\n the formality setting for any unsupported target language.
\nFor a list of target languages that support formality, see Supported languages\n in the Amazon Translate Developer Guide.
" + "smithy.api#documentation": "You can optionally specify the desired level of formality for translations\n to supported target languages. The formality\n setting controls the level of formal language usage (also known as register) in the\n translation output. You can set the value to informal or formal. If you don't specify a value for\n formality, or if the target language doesn't support formality, the translation will\n ignore the formality setting.
\nIf you specify multiple target languages for the job, translate ignores\n the formality setting for any unsupported target language.
\nFor a list of target languages that support formality, see Setting Formality\n in the Amazon Translate Developer Guide.
" } }, "Profanity": { "target": "com.amazonaws.translate#Profanity", "traits": { - "smithy.api#documentation": "Enable the profanity setting if you want Amazon Translate to mask profane words and\n phrases in your translation output.
\nTo mask profane words and phrases, Amazon Translate replaces them with the grawlix string\n “?$#@$“. This 5-character sequence is used for each profane word or phrase, regardless of the\n length or number of words.
\nAmazon Translate doesn't detect profanity in all of its supported languages. For languages\n that don't support profanity detection, see Unsupported languages in the Amazon Translate Developer Guide.
\nIf you specify multiple target languages for the job, all the\n target languages must support profanity masking. If any of the\n target languages don't support profanity masking, the\n translation job won't mask profanity for any target\n language.
" + "smithy.api#documentation": "Enable the profanity setting if you want Amazon Translate to mask profane words and\n phrases in your translation output.
\nTo mask profane words and phrases, Amazon Translate replaces them with the grawlix string\n “?$#@$“. This 5-character sequence is used for each profane word or phrase, regardless of the\n length or number of words.
\nAmazon Translate doesn't detect profanity in all of its supported languages. For languages\n that support profanity detection, see Masking\n profanity in the Amazon Translate Developer Guide.
\nIf you specify multiple target languages for the job, all the\n target languages must support profanity masking. If any of the\n target languages don't support profanity masking, the\n translation job won't mask profanity for any target\n language.
" } } }, "traits": { - "smithy.api#documentation": "Optional settings that configure the translation output. Use these settings for\n real time translations and asynchronous translation jobs.
" + "smithy.api#documentation": "Optional settings that configure the translation output. Use these settings for \n real time translations and asynchronous translation jobs.
" } }, "com.amazonaws.translate#UnboundedLengthString": { @@ -3932,7 +3931,7 @@ } ], "traits": { - "smithy.api#documentation": "Removes a specific tag associated with an Amazon Translate resource.\n For more information, see \n Tagging your resources.
" + "smithy.api#documentation": "Removes a specific tag associated with an Amazon Translate resource. \n For more information, see \n Tagging your resources.
" } }, "com.amazonaws.translate#UntagResourceRequest": { diff --git a/gradle.properties b/gradle.properties index 87ee3bc7ddf..00afb056280 100644 --- a/gradle.properties +++ b/gradle.properties @@ -13,7 +13,7 @@ sdkVersion=0.19.3-SNAPSHOT smithyVersion=1.26.1 smithyGradleVersion=0.6.0 # smithy-kotlin codegen and runtime are versioned together -smithyKotlinVersion=0.14.3-SNAPSHOT +smithyKotlinVersion=0.14.3 # kotlin kotlinVersion=1.7.10