From 5aa876e9c8a7eb2855c52838d15011647bc10fbc Mon Sep 17 00:00:00 2001 From: David Miller <45697098+dlm6693@users.noreply.github.com> Date: Wed, 28 Sep 2022 17:40:54 -0400 Subject: [PATCH 1/4] updated vanilla test files and rename provider test file (#2767) --- .../is-virtual-hostable-s3-bucket.json | 29 ++++++++++++++++++- .../endpoints/test-cases/valid-hostlabel.json | 9 ++++++ ...points_v2.py => test_endpoint_provider.py} | 0 3 files changed, 37 insertions(+), 1 deletion(-) rename tests/unit/{test_endpoints_v2.py => test_endpoint_provider.py} (100%) diff --git a/tests/unit/data/endpoints/test-cases/is-virtual-hostable-s3-bucket.json b/tests/unit/data/endpoints/test-cases/is-virtual-hostable-s3-bucket.json index 9641304ace..f67cc6147b 100644 --- a/tests/unit/data/endpoints/test-cases/is-virtual-hostable-s3-bucket.json +++ b/tests/unit/data/endpoints/test-cases/is-virtual-hostable-s3-bucket.json @@ -116,6 +116,33 @@ "expect": { "error": "not isVirtualHostableS3Bucket" } + }, + { + "documentation": "192.168.5.4: not isVirtualHostable (formatted like an ip address)", + "params": { + "BucketName": "192.168.5.4" + }, + "expect": { + "error": "not isVirtualHostableS3Bucket" + } + }, + { + "documentation": "bucket-.name: not isVirtualHostable (invalid label, ends with a -)", + "params": { + "BucketName": "bucket-.name" + }, + "expect": { + "error": "not isVirtualHostableS3Bucket" + } + }, + { + "documentation": "bucket.-name: not isVirtualHostable (invalid label, starts with a -)", + "params": { + "BucketName": "bucket.-name" + }, + "expect": { + "error": "not isVirtualHostableS3Bucket" + } } ] -} \ No newline at end of file +} diff --git a/tests/unit/data/endpoints/test-cases/valid-hostlabel.json b/tests/unit/data/endpoints/test-cases/valid-hostlabel.json index 70f148d4b1..82287b727b 100644 --- a/tests/unit/data/endpoints/test-cases/valid-hostlabel.json +++ b/tests/unit/data/endpoints/test-cases/valid-hostlabel.json @@ -42,6 +42,15 @@ "expect": { "error": "Invalid hostlabel" } + }, + { + "documentation": "an empty string is not a valid hostlabel", + "params": { + "Region": "" + }, + "expect": { + "error": "Invalid hostlabel" + } } ] } diff --git a/tests/unit/test_endpoints_v2.py b/tests/unit/test_endpoint_provider.py similarity index 100% rename from tests/unit/test_endpoints_v2.py rename to tests/unit/test_endpoint_provider.py From a41eae0fa95ab6c0d80b51c1f495dca1241fa2c9 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 29 Sep 2022 18:09:37 +0000 Subject: [PATCH 2/4] Update to latest models --- .../next-release/api-change-acm-11031.json | 5 + .../next-release/api-change-ec2-28737.json | 5 + .../api-change-emrserverless-31104.json | 5 + .../next-release/api-change-fsx-23319.json | 5 + ...change-migrationhuborchestrator-50518.json | 5 + .../next-release/api-change-polly-705.json | 5 + .../next-release/api-change-proton-80007.json | 5 + .../api-change-sagemaker-88536.json | 5 + .../api-change-secretsmanager-71738.json | 5 + .../api-change-translate-83017.json | 5 + .../api-change-workspaces-80407.json | 5 + botocore/data/acm/2015-12-08/service-2.json | 123 +- botocore/data/ec2/2016-11-15/service-2.json | 3 +- .../emr-serverless/2021-07-13/service-2.json | 56 +- botocore/data/fsx/2018-03-01/service-2.json | 667 +++- .../2021-08-28/paginators-1.json | 46 + .../2021-08-28/service-2.json | 2910 +++++++++++++++++ .../2021-08-28/waiters-2.json | 5 + botocore/data/polly/2016-06-10/service-2.json | 6 +- .../data/proton/2020-07-20/service-2.json | 206 +- .../data/sagemaker/2017-07-24/service-2.json | 70 +- .../secretsmanager/2017-10-17/service-2.json | 44 +- .../data/translate/2017-07-01/service-2.json | 147 +- .../data/workspaces/2015-04-08/service-2.json | 13 +- 24 files changed, 4159 insertions(+), 192 deletions(-) create mode 100644 .changes/next-release/api-change-acm-11031.json create mode 100644 .changes/next-release/api-change-ec2-28737.json create mode 100644 .changes/next-release/api-change-emrserverless-31104.json create mode 100644 .changes/next-release/api-change-fsx-23319.json create mode 100644 .changes/next-release/api-change-migrationhuborchestrator-50518.json create mode 100644 .changes/next-release/api-change-polly-705.json create mode 100644 .changes/next-release/api-change-proton-80007.json create mode 100644 .changes/next-release/api-change-sagemaker-88536.json create mode 100644 .changes/next-release/api-change-secretsmanager-71738.json create mode 100644 .changes/next-release/api-change-translate-83017.json create mode 100644 .changes/next-release/api-change-workspaces-80407.json create mode 100644 botocore/data/migrationhuborchestrator/2021-08-28/paginators-1.json create mode 100644 botocore/data/migrationhuborchestrator/2021-08-28/service-2.json create mode 100644 botocore/data/migrationhuborchestrator/2021-08-28/waiters-2.json diff --git a/.changes/next-release/api-change-acm-11031.json b/.changes/next-release/api-change-acm-11031.json new file mode 100644 index 0000000000..29206055c5 --- /dev/null +++ b/.changes/next-release/api-change-acm-11031.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``acm``", + "description": "This update returns additional certificate details such as certificate SANs and allows sorting in the ListCertificates API." +} diff --git a/.changes/next-release/api-change-ec2-28737.json b/.changes/next-release/api-change-ec2-28737.json new file mode 100644 index 0000000000..47eb1b7af5 --- /dev/null +++ b/.changes/next-release/api-change-ec2-28737.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``ec2``", + "description": "u-3tb1 instances are powered by Intel Xeon Platinum 8176M (Skylake) processors and are purpose-built to run large in-memory databases." +} diff --git a/.changes/next-release/api-change-emrserverless-31104.json b/.changes/next-release/api-change-emrserverless-31104.json new file mode 100644 index 0000000000..05b6d456f9 --- /dev/null +++ b/.changes/next-release/api-change-emrserverless-31104.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``emr-serverless``", + "description": "This release adds API support to debug Amazon EMR Serverless jobs in real-time with live application UIs" +} diff --git a/.changes/next-release/api-change-fsx-23319.json b/.changes/next-release/api-change-fsx-23319.json new file mode 100644 index 0000000000..73938698c6 --- /dev/null +++ b/.changes/next-release/api-change-fsx-23319.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``fsx``", + "description": "This release adds support for Amazon File Cache." +} diff --git a/.changes/next-release/api-change-migrationhuborchestrator-50518.json b/.changes/next-release/api-change-migrationhuborchestrator-50518.json new file mode 100644 index 0000000000..b136e29e9d --- /dev/null +++ b/.changes/next-release/api-change-migrationhuborchestrator-50518.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``migrationhuborchestrator``", + "description": "Introducing AWS MigrationHubOrchestrator. This is the first public release of AWS MigrationHubOrchestrator." +} diff --git a/.changes/next-release/api-change-polly-705.json b/.changes/next-release/api-change-polly-705.json new file mode 100644 index 0000000000..5089e12e94 --- /dev/null +++ b/.changes/next-release/api-change-polly-705.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``polly``", + "description": "Added support for the new Cantonese voice - Hiujin. Hiujin is available as a Neural voice only." +} diff --git a/.changes/next-release/api-change-proton-80007.json b/.changes/next-release/api-change-proton-80007.json new file mode 100644 index 0000000000..f477804297 --- /dev/null +++ b/.changes/next-release/api-change-proton-80007.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``proton``", + "description": "This release adds an option to delete pipeline provisioning repositories using the UpdateAccountSettings API" +} diff --git a/.changes/next-release/api-change-sagemaker-88536.json b/.changes/next-release/api-change-sagemaker-88536.json new file mode 100644 index 0000000000..093a8819c3 --- /dev/null +++ b/.changes/next-release/api-change-sagemaker-88536.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``sagemaker``", + "description": "SageMaker Training Managed Warm Pools let you retain provisioned infrastructure to reduce latency for repetitive training workloads." +} diff --git a/.changes/next-release/api-change-secretsmanager-71738.json b/.changes/next-release/api-change-secretsmanager-71738.json new file mode 100644 index 0000000000..4cfcfab432 --- /dev/null +++ b/.changes/next-release/api-change-secretsmanager-71738.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``secretsmanager``", + "description": "Documentation updates for Secrets Manager" +} diff --git a/.changes/next-release/api-change-translate-83017.json b/.changes/next-release/api-change-translate-83017.json new file mode 100644 index 0000000000..3dac689271 --- /dev/null +++ b/.changes/next-release/api-change-translate-83017.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``translate``", + "description": "This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization." +} diff --git a/.changes/next-release/api-change-workspaces-80407.json b/.changes/next-release/api-change-workspaces-80407.json new file mode 100644 index 0000000000..e8c7d41499 --- /dev/null +++ b/.changes/next-release/api-change-workspaces-80407.json @@ -0,0 +1,5 @@ +{ + "type": "api-change", + "category": "``workspaces``", + "description": "This release includes diagnostic log uploading feature. If it is enabled, the log files of WorkSpaces Windows client will be sent to Amazon WorkSpaces automatically for troubleshooting. You can use modifyClientProperty api to enable/disable this feature." +} diff --git a/botocore/data/acm/2015-12-08/service-2.json b/botocore/data/acm/2015-12-08/service-2.json index 5d964fe9d8..103e916066 100644 --- a/botocore/data/acm/2015-12-08/service-2.json +++ b/botocore/data/acm/2015-12-08/service-2.json @@ -57,7 +57,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Returns detailed metadata about the specified ACM certificate.

" + "documentation":"

Returns detailed metadata about the specified ACM certificate.

If you have just created a certificate using the RequestCertificate action, there is a delay of several seconds before you can retrieve information about it.

" }, "ExportCertificate":{ "name":"ExportCertificate", @@ -119,7 +119,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Imports a certificate into Amazon Web Services Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" + "documentation":"

Imports a certificate into Certificate Manager (ACM) to use with services that are integrated with ACM. Note that integrated services allow only certificate types and keys they support to be associated with their resources. Further, their support differs depending on whether the certificate is imported into IAM or into ACM. For more information, see the documentation for each service. For more information about importing certificates into ACM, see Importing Certificates in the Certificate Manager User Guide.

ACM does not provide managed renewal for certificates that you import.

Note the following guidelines when importing third party certificates:

This operation returns the Amazon Resource Name (ARN) of the imported certificate.

" }, "ListCertificates":{ "name":"ListCertificates", @@ -191,7 +191,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArnException"} ], - "documentation":"

Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM PCA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.

" + "documentation":"

Renews an eligible ACM certificate. At this time, only exported private certificates can be renewed with this operation. In order to renew your ACM Private CA certificates with ACM, you must first grant the ACM service principal permission to do so. For more information, see Testing Managed Renewal in the ACM User Guide.

" }, "RequestCertificate":{ "name":"RequestCertificate", @@ -210,7 +210,7 @@ {"shape":"TagPolicyException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames parameter.

If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.

ACM behavior differs from the https://tools.ietf.org/html/rfc6125#appendix-B.2RFC 6125 specification of the certificate validation process. first checks for a subject alternative name, and, if it finds one, ignores the common name (CN)

" + "documentation":"

Requests an ACM certificate for use with other Amazon Web Services services. To request an ACM certificate, you must specify a fully qualified domain name (FQDN) in the DomainName parameter. You can also specify additional FQDNs in the SubjectAlternativeNames parameter.

If you are requesting a private certificate, domain validation is not required. If you are requesting a public certificate, each domain name that you specify must be validated to verify that you own or control the domain. You can use DNS validation or email validation. We recommend that you use DNS validation. ACM issues public certificates after receiving approval from the domain owner.

ACM behavior differs from the RFC 6125 specification of the certificate validation process. ACM first checks for a Subject Alternative Name, and, if it finds one, ignores the common name (CN).

After successful completion of the RequestCertificate action, there is a delay of several seconds before you can retrieve information about the new certificate.

" }, "ResendValidationEmail":{ "name":"ResendValidationEmail", @@ -340,11 +340,11 @@ }, "ImportedAt":{ "shape":"TStamp", - "documentation":"

The date and time at which the certificate was imported. This value exists only when the certificate type is IMPORTED.

" + "documentation":"

The date and time when the certificate was imported. This value exists only when the certificate type is IMPORTED.

" }, "Status":{ "shape":"CertificateStatus", - "documentation":"

The status of the certificate.

" + "documentation":"

The status of the certificate.

A certificate enters status PENDING_VALIDATION upon being requested, unless it fails for any of the reasons given in the troubleshooting topic Certificate request fails. ACM makes repeated attempts to validate a certificate for 72 hours and then times out. If a certificate shows status FAILED or VALIDATION_TIMED_OUT, delete the request, correct the issue with DNS validation or Email validation, and try again. If validation succeeds, the certificate enters status ISSUED.

" }, "RevokedAt":{ "shape":"TStamp", @@ -376,11 +376,11 @@ }, "FailureReason":{ "shape":"FailureReason", - "documentation":"

The reason the certificate request failed. This value exists only when the certificate status is FAILED. For more information, see Certificate Request Failed in the Amazon Web Services Certificate Manager User Guide.

" + "documentation":"

The reason the certificate request failed. This value exists only when the certificate status is FAILED. For more information, see Certificate Request Failed in the Certificate Manager User Guide.

" }, "Type":{ "shape":"CertificateType", - "documentation":"

The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED. For certificates that you imported with ImportCertificate, this value is IMPORTED. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Amazon Web Services Certificate Manager User Guide.

" + "documentation":"

The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED. For certificates that you imported with ImportCertificate, this value is IMPORTED. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Certificate Manager User Guide.

" }, "RenewalSummary":{ "shape":"RenewalSummary", @@ -396,7 +396,7 @@ }, "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the ACM PCA private certificate authority (CA) that issued the certificate. This has the following format:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) of the private certificate authority (CA) that issued the certificate. This has the following format:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "RenewalEligibility":{ "shape":"RenewalEligibility", @@ -445,6 +445,73 @@ "DomainName":{ "shape":"DomainNameString", "documentation":"

Fully qualified domain name (FQDN), such as www.example.com or example.com, for the certificate.

" + }, + "SubjectAlternativeNameSummaries":{ + "shape":"DomainList", + "documentation":"

One or more domain names (subject alternative names) included in the certificate. This list contains the domain names that are bound to the public key that is contained in the certificate. The subject alternative names include the canonical domain name (CN) of the certificate and additional domain names that can be used to connect to the website.

When called by ListCertificates, this parameter will only return the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate.

" + }, + "HasAdditionalSubjectAlternativeNames":{ + "shape":"NullableBoolean", + "documentation":"

When called by ListCertificates, indicates whether the full list of subject alternative names has been included in the response. If false, the response includes all of the subject alternative names included in the certificate. If true, the response only includes the first 100 subject alternative names included in the certificate. To display the full list of subject alternative names, use DescribeCertificate.

", + "box":true + }, + "Status":{ + "shape":"CertificateStatus", + "documentation":"

The status of the certificate.

A certificate enters status PENDING_VALIDATION upon being requested, unless it fails for any of the reasons given in the troubleshooting topic Certificate request fails. ACM makes repeated attempts to validate a certificate for 72 hours and then times out. If a certificate shows status FAILED or VALIDATION_TIMED_OUT, delete the request, correct the issue with DNS validation or Email validation, and try again. If validation succeeds, the certificate enters status ISSUED.

" + }, + "Type":{ + "shape":"CertificateType", + "documentation":"

The source of the certificate. For certificates provided by ACM, this value is AMAZON_ISSUED. For certificates that you imported with ImportCertificate, this value is IMPORTED. ACM does not provide managed renewal for imported certificates. For more information about the differences between certificates that you import and those that ACM provides, see Importing Certificates in the Certificate Manager User Guide.

" + }, + "KeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

The algorithm that was used to generate the public-private key pair.

" + }, + "KeyUsages":{ + "shape":"KeyUsageNames", + "documentation":"

A list of Key Usage X.509 v3 extension objects. Each object is a string value that identifies the purpose of the public key contained in the certificate. Possible extension values include DIGITAL_SIGNATURE, KEY_ENCHIPHERMENT, NON_REPUDIATION, and more.

" + }, + "ExtendedKeyUsages":{ + "shape":"ExtendedKeyUsageNames", + "documentation":"

Contains a list of Extended Key Usage X.509 v3 extension objects. Each object specifies a purpose for which the certificate public key can be used and consists of a name and an object identifier (OID).

" + }, + "InUse":{ + "shape":"NullableBoolean", + "documentation":"

Indicates whether the certificate is currently in use by any Amazon Web Services resources.

", + "box":true + }, + "Exported":{ + "shape":"NullableBoolean", + "documentation":"

Indicates whether the certificate has been exported. This value exists only when the certificate type is PRIVATE.

", + "box":true + }, + "RenewalEligibility":{ + "shape":"RenewalEligibility", + "documentation":"

Specifies whether the certificate is eligible for renewal. At this time, only exported private certificates can be renewed with the RenewCertificate command.

" + }, + "NotBefore":{ + "shape":"TStamp", + "documentation":"

The time before which the certificate is not valid.

" + }, + "NotAfter":{ + "shape":"TStamp", + "documentation":"

The time after which the certificate is not valid.

" + }, + "CreatedAt":{ + "shape":"TStamp", + "documentation":"

The time at which the certificate was requested.

" + }, + "IssuedAt":{ + "shape":"TStamp", + "documentation":"

The time at which the certificate was issued. This value exists only when the certificate type is AMAZON_ISSUED.

" + }, + "ImportedAt":{ + "shape":"TStamp", + "documentation":"

The date and time when the certificate was imported. This value exists only when the certificate type is IMPORTED.

" + }, + "RevokedAt":{ + "shape":"TStamp", + "documentation":"

The time at which the certificate was revoked. This value exists only when the certificate status is REVOKED.

" } }, "documentation":"

This structure is returned in the response object of ListCertificates action.

" @@ -609,7 +676,7 @@ }, "Passphrase":{ "shape":"PassphraseBlob", - "documentation":"

Passphrase to associate with the encrypted exported private key. If you want to later decrypt the private key, you must have the passphrase. You can use the following OpenSSL command to decrypt a private key:

openssl rsa -in encrypted_key.pem -out decrypted_key.pem

" + "documentation":"

Passphrase to associate with the encrypted exported private key.

When creating your passphrase, you can use any ASCII character except #, $, or %.

If you want to later decrypt the private key, you must have the passphrase. You can use the following OpenSSL command to decrypt a private key. After entering the command, you are prompted for the passphrase.

openssl rsa -in encrypted_key.pem -out decrypted_key.pem

" } } }, @@ -669,6 +736,10 @@ "CUSTOM" ] }, + "ExtendedKeyUsageNames":{ + "type":"list", + "member":{"shape":"ExtendedKeyUsageName"} + }, "FailureReason":{ "type":"string", "enum":[ @@ -704,7 +775,7 @@ }, "keyTypes":{ "shape":"KeyAlgorithmList", - "documentation":"

Specify one or more algorithms that can be used to generate key pairs.

Default filtering returns only RSA_1024 and RSA_2048 certificates that have at least one domain. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048,RSA_4096\"] returns both RSA_2048 and RSA_4096 certificates.

" + "documentation":"

Specify one or more algorithms that can be used to generate key pairs.

Default filtering returns only RSA_1024 and RSA_2048 certificates that have at least one domain. To return other certificate types, provide the desired type signatures in a comma-separated list. For example, \"keyTypes\": [\"RSA_2048\",\"RSA_4096\"] returns both RSA_2048 and RSA_4096 certificates.

" } }, "documentation":"

This structure can be used in the ListCertificates action to filter the output of the certificate list.

" @@ -887,6 +958,10 @@ "CUSTOM" ] }, + "KeyUsageNames":{ + "type":"list", + "member":{"shape":"KeyUsageName"} + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -913,6 +988,14 @@ "MaxItems":{ "shape":"MaxItems", "documentation":"

Use this parameter when paginating results to specify the maximum number of items to return in the response. If additional items exist beyond the number you specify, the NextToken element is sent in the response. Use this NextToken value in a subsequent request to retrieve additional items.

" + }, + "SortBy":{ + "shape":"SortBy", + "documentation":"

Specifies the field to sort results by. If you specify SortBy, you must also specify SortOrder.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

Specifies the order of sorted results. If you specify SortOrder, you must also specify SortBy.

" } } }, @@ -959,6 +1042,7 @@ "min":1, "pattern":"[\\u0009\\u000A\\u000D\\u0020-\\u00FF]*" }, + "NullableBoolean":{"type":"boolean"}, "PassphraseBlob":{ "type":"blob", "max":128, @@ -1076,7 +1160,7 @@ "members":{ "DomainName":{ "shape":"DomainNameString", - "documentation":"

Fully qualified domain name (FQDN), such as www.example.com, that you want to secure with an ACM certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, *.example.com protects www.example.com, site.example.com, and images.example.com.

The first domain name you enter cannot exceed 64 octets, including periods. Each subsequent Subject Alternative Name (SAN), however, can be up to 253 octets in length.

" + "documentation":"

Fully qualified domain name (FQDN), such as www.example.com, that you want to secure with an ACM certificate. Use an asterisk (*) to create a wildcard certificate that protects several sites in the same domain. For example, *.example.com protects www.example.com, site.example.com, and images.example.com.

In compliance with RFC 5280, the length of the domain name (technically, the Common Name) that you provide cannot exceed 64 octets (characters), including periods. To add a longer domain name, specify it in the Subject Alternative Name field, which supports names up to 253 octets in length.

" }, "ValidationMethod":{ "shape":"ValidationMethod", @@ -1100,7 +1184,7 @@ }, "CertificateAuthorityArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the Amazon Web Services Certificate Manager Private Certificate Authority (PCA) user guide. The ARN must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" + "documentation":"

The Amazon Resource Name (ARN) of the private certificate authority (CA) that will be used to issue the certificate. If you do not provide an ARN and you are trying to request a private certificate, ACM will attempt to issue a public certificate. For more information about private CAs, see the Certificate Manager Private Certificate Authority user guide. The ARN must have the following form:

arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012

" }, "Tags":{ "shape":"TagList", @@ -1202,6 +1286,17 @@ ] }, "ServiceErrorMessage":{"type":"string"}, + "SortBy":{ + "type":"string", + "enum":["CREATED_AT"] + }, + "SortOrder":{ + "type":"string", + "enum":[ + "ASCENDING", + "DESCENDING" + ] + }, "String":{"type":"string"}, "TStamp":{"type":"timestamp"}, "Tag":{ @@ -1301,5 +1396,5 @@ ] } }, - "documentation":"Amazon Web Services Certificate Manager

You can use Amazon Web Services Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Amazon Web Services Certificate Manager User Guide.

" + "documentation":"Certificate Manager

You can use Certificate Manager (ACM) to manage SSL/TLS certificates for your Amazon Web Services-based websites and applications. For more information about using ACM, see the Certificate Manager User Guide.

" } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 03cd2e8701..db467fce42 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -32212,7 +32212,8 @@ "r6a.32xlarge", "r6a.48xlarge", "r6a.metal", - "p4de.24xlarge" + "p4de.24xlarge", + "u-3tb1.56xlarge" ] }, "InstanceTypeHypervisor":{ diff --git a/botocore/data/emr-serverless/2021-07-13/service-2.json b/botocore/data/emr-serverless/2021-07-13/service-2.json index a9b421f87f..c6f2c41793 100644 --- a/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -79,6 +79,22 @@ ], "documentation":"

Displays detailed information about a specified application.

" }, + "GetDashboardForJobRun":{ + "name":"GetDashboardForJobRun", + "http":{ + "method":"GET", + "requestUri":"/applications/{applicationId}/jobruns/{jobRunId}/dashboard", + "responseCode":200 + }, + "input":{"shape":"GetDashboardForJobRunRequest"}, + "output":{"shape":"GetDashboardForJobRunResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a URL to access the job run dashboard.

" + }, "GetJobRun":{ "name":"GetJobRun", "http":{ @@ -430,8 +446,7 @@ }, "idleTimeoutMinutes":{ "shape":"AutoStopConfigIdleTimeoutMinutesInteger", - "documentation":"

The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes.

", - "box":true + "documentation":"

The amount of idle time in minutes after which your application will automatically stop. Defaults to 15 minutes.

" } }, "documentation":"

The configuration for an application to automatically stop after a certain amount of time being idle.

" @@ -704,6 +719,36 @@ } } }, + "GetDashboardForJobRunRequest":{ + "type":"structure", + "required":[ + "applicationId", + "jobRunId" + ], + "members":{ + "applicationId":{ + "shape":"ApplicationId", + "documentation":"

The ID of the application.

", + "location":"uri", + "locationName":"applicationId" + }, + "jobRunId":{ + "shape":"JobRunId", + "documentation":"

The ID of the job run.

", + "location":"uri", + "locationName":"jobRunId" + } + } + }, + "GetDashboardForJobRunResponse":{ + "type":"structure", + "members":{ + "url":{ + "shape":"Url", + "documentation":"

The URL to view job run's dashboard.

" + } + } + }, "GetJobRunRequest":{ "type":"structure", "required":[ @@ -1024,7 +1069,6 @@ "maxResults":{ "shape":"ListApplicationsRequestMaxResultsInteger", "documentation":"

The maximum number of applications that can be listed.

", - "box":true, "location":"querystring", "locationName":"maxResults" }, @@ -1075,7 +1119,6 @@ "maxResults":{ "shape":"ListJobRunsRequestMaxResultsInteger", "documentation":"

The maximum number of job runs that can be listed.

", - "box":true, "location":"querystring", "locationName":"maxResults" }, @@ -1599,6 +1642,11 @@ "min":1, "pattern":".*[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDBFF-\\uDC00\\uDFFF\\r\\n\\t]*.*" }, + "Url":{ + "type":"string", + "max":2048, + "min":1 + }, "ValidationException":{ "type":"structure", "required":["message"], diff --git a/botocore/data/fsx/2018-03-01/service-2.json b/botocore/data/fsx/2018-03-01/service-2.json index 315a2a0797..9844b69853 100644 --- a/botocore/data/fsx/2018-03-01/service-2.json +++ b/botocore/data/fsx/2018-03-01/service-2.json @@ -107,7 +107,7 @@ {"shape":"ServiceLimitExceeded"}, {"shape":"InternalServerError"} ], - "documentation":"

Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", + "documentation":"

Creates an Amazon FSx for Lustre data repository association (DRA). A data repository association is a link between a directory on the file system and an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository associations on a file system. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

Each data repository association must have a unique Amazon FSx file system directory and a unique S3 bucket or prefix associated with it. You can configure a data repository association for automatic import only, for automatic export only, or for both. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

CreateDataRepositoryAssociation isn't supported on Amazon File Cache resources. To create a DRA on Amazon File Cache, use the CreateFileCache operation.

", "idempotent":true }, "CreateDataRepositoryTask":{ @@ -130,6 +130,26 @@ "documentation":"

Creates an Amazon FSx for Lustre data repository task. You use data repository tasks to perform bulk operations between your Amazon FSx file system and its linked data repositories. An example of a data repository task is exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to a linked data repository. A CreateDataRepositoryTask operation will fail if a data repository is not linked to the FSx file system. To learn more about data repository tasks, see Data Repository Tasks. To learn more about linking a data repository to your file system, see Linking your file system to an S3 bucket.

", "idempotent":true }, + "CreateFileCache":{ + "name":"CreateFileCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateFileCacheRequest"}, + "output":{"shape":"CreateFileCacheResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"IncompatibleParameterError"}, + {"shape":"InvalidNetworkSettings"}, + {"shape":"InvalidPerUnitStorageThroughput"}, + {"shape":"ServiceLimitExceeded"}, + {"shape":"InternalServerError"}, + {"shape":"MissingFileCacheConfiguration"} + ], + "documentation":"

Creates a new Amazon File Cache resource.

You can use this operation with a client request token in the request that Amazon File Cache uses to ensure idempotent creation. If a cache with the specified client request token exists and the parameters match, CreateFileCache returns the description of the existing cache. If a cache with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file cache with the specified client request token doesn't exist, CreateFileCache does the following:

The CreateFileCache call returns while the cache's lifecycle state is still CREATING. You can check the cache creation status by calling the DescribeFileCaches operation, which returns the cache state along with other information.

", + "idempotent":true + }, "CreateFileSystem":{ "name":"CreateFileSystem", "http":{ @@ -150,7 +170,7 @@ {"shape":"InternalServerError"}, {"shape":"MissingFileSystemConfiguration"} ], - "documentation":"

Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation:

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport-level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives a success message as long as the parameters are the same.

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

" + "documentation":"

Creates a new, empty Amazon FSx file system. You can create the following supported Amazon FSx file systems using the CreateFileSystem API operation:

This operation requires a client request token in the request that Amazon FSx uses to ensure idempotent creation. This means that calling the operation multiple times with the same client request token has no effect. By using the idempotent operation, you can retry a CreateFileSystem operation without the risk of creating an extra file system. This approach can be useful when an initial call fails in a way that makes it unclear whether a file system was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a file system, the client receives success as long as the parameters are the same.

If a file system with the specified client request token exists and the parameters match, CreateFileSystem returns the description of the existing file system. If a file system with the specified client request token exists and the parameters don't match, this call returns IncompatibleParameterError. If a file system with the specified client request token doesn't exist, CreateFileSystem does the following:

The CreateFileSystem call returns while the file system's lifecycle state is still CREATING. You can check the file-system creation status by calling the DescribeFileSystems operation, which returns the file system state along with other information.

" }, "CreateFileSystemFromBackup":{ "name":"CreateFileSystemFromBackup", @@ -287,6 +307,24 @@ "documentation":"

Deletes a data repository association on an Amazon FSx for Lustre file system. Deleting the data repository association unlinks the file system from the Amazon S3 bucket. When deleting a data repository association, you have the option of deleting the data in the file system that corresponds to the data repository association. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

", "idempotent":true }, + "DeleteFileCache":{ + "name":"DeleteFileCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFileCacheRequest"}, + "output":{"shape":"DeleteFileCacheResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"IncompatibleParameterError"}, + {"shape":"FileCacheNotFound"}, + {"shape":"ServiceLimitExceeded"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Deletes an Amazon File Cache resource. After deletion, the cache no longer exists, and its data is gone.

The DeleteFileCache operation returns while the cache has the DELETING status. You can check the cache deletion status by calling the DescribeFileCaches operation, which returns a list of caches in your account. If you pass the cache ID for a deleted cache, the DescribeFileCaches operation returns a FileCacheNotFound error.

The data in a deleted cache is also deleted and can't be recovered by any means.

", + "idempotent":true + }, "DeleteFileSystem":{ "name":"DeleteFileSystem", "http":{ @@ -385,7 +423,7 @@ {"shape":"InvalidDataRepositoryType"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the description of specific Amazon FSx for Lustre data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

", + "documentation":"

Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository associations, if one or more AssociationIds values are provided in the request, or if filters are used in the request. Data repository associations are supported only for Amazon FSx for Lustre file systems with the Persistent_2 deployment type and for Amazon File Cache resources.

You can use filters to narrow the response to include just data repository associations for specific file systems (use the file-system-id filter with the ID of the file system) or caches (use the file-cache-id filter with the ID of the cache), or data repository associations for a specific repository type (use the data-repository-type filter with a value of S3 or NFS). If you don't use filters, the response returns all data repository associations owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all data repository associations, you can paginate the response by using the optional MaxResults parameter to limit the number of data repository associations returned in a response. If more data repository associations remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

", "idempotent":true }, "DescribeDataRepositoryTasks":{ @@ -402,7 +440,23 @@ {"shape":"DataRepositoryTaskNotFound"}, {"shape":"InternalServerError"} ], - "documentation":"

Returns the description of specific Amazon FSx for Lustre data repository tasks, if one or more TaskIds values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all tasks, you can paginate the response by using the optional MaxResults parameter to limit the number of tasks returned in a response. If more tasks remain, Amazon FSx returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

" + "documentation":"

Returns the description of specific Amazon FSx for Lustre or Amazon File Cache data repository tasks, if one or more TaskIds values are provided in the request, or if filters are used in the request. You can use filters to narrow the response to include just tasks for specific file systems or caches, or tasks in a specific lifecycle state. Otherwise, it returns all data repository tasks owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all tasks, you can paginate the response by using the optional MaxResults parameter to limit the number of tasks returned in a response. If more tasks remain, a NextToken value is returned in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

" + }, + "DescribeFileCaches":{ + "name":"DescribeFileCaches", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFileCachesRequest"}, + "output":{"shape":"DescribeFileCachesResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"FileCacheNotFound"}, + {"shape":"InternalServerError"} + ], + "documentation":"

Returns the description of a specific Amazon File Cache resource, if a FileCacheIds value is provided for that cache. Otherwise, it returns descriptions of all caches owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint that you're calling.

When retrieving all cache descriptions, you can optionally specify the MaxResults parameter to limit the number of descriptions in a response. If more cache descriptions remain, the operation returns a NextToken value in the response. In this case, send a later request with the NextToken request parameter set to the value of NextToken from the last response.

This operation is used in an iterative process to retrieve a list of your cache descriptions. DescribeFileCaches is called first without a NextTokenvalue. Then the operation continues to be called with the NextToken parameter set to the value of the last NextToken value until a response has no NextToken.

When using this operation, keep the following in mind:

", + "idempotent":true }, "DescribeFileSystemAliases":{ "name":"DescribeFileSystemAliases", @@ -599,6 +653,26 @@ "documentation":"

Updates the configuration of an existing data repository association on an Amazon FSx for Lustre file system. Data repository associations are supported only for file systems with the Persistent_2 deployment type.

", "idempotent":true }, + "UpdateFileCache":{ + "name":"UpdateFileCache", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateFileCacheRequest"}, + "output":{"shape":"UpdateFileCacheResponse"}, + "errors":[ + {"shape":"BadRequest"}, + {"shape":"UnsupportedOperation"}, + {"shape":"IncompatibleParameterError"}, + {"shape":"InternalServerError"}, + {"shape":"FileCacheNotFound"}, + {"shape":"MissingFileCacheConfiguration"}, + {"shape":"ServiceLimitExceeded"} + ], + "documentation":"

Updates the configuration of an existing Amazon File Cache resource. You can update multiple properties in a single request.

", + "idempotent":true + }, "UpdateFileSystem":{ "name":"UpdateFileSystem", "http":{ @@ -870,10 +944,10 @@ "members":{ "Events":{ "shape":"EventTypes", - "documentation":"

The AutoExportPolicy can have the following event values:

You can define any combination of event types for your AutoExportPolicy.

" + "documentation":"

The AutoExportPolicy can have the following event values:

You can define any combination of event types for your AutoExportPolicy.

" } }, - "documentation":"

Describes a data repository association's automatic export policy. The AutoExportPolicy defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx automatically exports the defined changes asynchronously once your application finishes modifying the file.

This AutoExportPolicy is supported only for file systems with the Persistent_2 deployment type.

" + "documentation":"

Describes a data repository association's automatic export policy. The AutoExportPolicy defines the types of updated objects on the file system that will be automatically exported to the data repository. As you create, modify, or delete files, Amazon FSx for Lustre automatically exports the defined changes asynchronously once your application finishes modifying the file.

This AutoExportPolicy is supported only for Amazon FSx for Lustre file systems with the Persistent_2 deployment type.

" }, "AutoImportPolicy":{ "type":"structure", @@ -883,7 +957,7 @@ "documentation":"

The AutoImportPolicy can have the following event values:

You can define any combination of event types for your AutoImportPolicy.

" } }, - "documentation":"

Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your file system as you modify objects in a linked S3 bucket.

This AutoImportPolicy is supported only for file systems with the Persistent_2 deployment type.

" + "documentation":"

Describes the data repository association's automatic import policy. The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory listings up to date by importing changes to your Amazon FSx for Lustre file system as you modify objects in a linked S3 bucket.

The AutoImportPolicy is supported only for Amazon FSx for Lustre file systems with the Persistent_2 deployment type.

" }, "AutoImportPolicyType":{ "type":"string", @@ -1086,6 +1160,11 @@ } } }, + "CapacityToRelease":{ + "type":"long", + "max":2147483647, + "min":1 + }, "ClientRequestToken":{ "type":"string", "documentation":"

(Optional) An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

", @@ -1151,6 +1230,7 @@ "Backup":{"shape":"Backup"} } }, + "CopyTagsToDataRepositoryAssociations":{"type":"boolean"}, "CreateBackupRequest":{ "type":"structure", "members":{ @@ -1188,14 +1268,13 @@ "type":"structure", "required":[ "FileSystemId", - "FileSystemPath", "DataRepositoryPath" ], "members":{ "FileSystemId":{"shape":"FileSystemId"}, "FileSystemPath":{ "shape":"Namespace", - "documentation":"

A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2.

This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

If you specify only a forward slash (/) as the file system path, you can link only 1 data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.

" + "documentation":"

A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2.

This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

If you specify only a forward slash (/) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.

" }, "DataRepositoryPath":{ "shape":"ArchivePath", @@ -1254,7 +1333,11 @@ "shape":"ClientRequestToken", "idempotencyToken":true }, - "Tags":{"shape":"Tags"} + "Tags":{"shape":"Tags"}, + "CapacityToRelease":{ + "shape":"CapacityToRelease", + "documentation":"

Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.

" + } } }, "CreateDataRepositoryTaskResponse":{ @@ -1266,6 +1349,94 @@ } } }, + "CreateFileCacheDataRepositoryAssociations":{ + "type":"list", + "member":{"shape":"FileCacheDataRepositoryAssociation"}, + "max":8 + }, + "CreateFileCacheLustreConfiguration":{ + "type":"structure", + "required":[ + "PerUnitStorageThroughput", + "DeploymentType", + "MetadataConfiguration" + ], + "members":{ + "PerUnitStorageThroughput":{ + "shape":"PerUnitStorageThroughput", + "documentation":"

Provisions the amount of read and write throughput for each 1 tebibyte (TiB) of cache storage capacity, in MB/s/TiB. The only supported value is 1000.

" + }, + "DeploymentType":{ + "shape":"FileCacheLustreDeploymentType", + "documentation":"

Specifies the cache deployment type, which must be CACHE_1.

" + }, + "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, + "MetadataConfiguration":{ + "shape":"FileCacheLustreMetadataConfiguration", + "documentation":"

The configuration for a Lustre MDT (Metadata Target) storage volume.

" + } + }, + "documentation":"

The Amazon File Cache configuration for the cache that you are creating.

" + }, + "CreateFileCacheRequest":{ + "type":"structure", + "required":[ + "FileCacheType", + "FileCacheTypeVersion", + "StorageCapacity", + "SubnetIds" + ], + "members":{ + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "documentation":"

An idempotency token for resource creation, in a string of up to 64 ASCII characters. This token is automatically filled on your behalf when you use the Command Line Interface (CLI) or an Amazon Web Services SDK.

By using the idempotent operation, you can retry a CreateFileCache operation without the risk of creating an extra cache. This approach can be useful when an initial call fails in a way that makes it unclear whether a cache was created. Examples are if a transport level timeout occurred, or your connection was reset. If you use the same client request token and the initial call created a cache, the client receives success as long as the parameters are the same.

", + "idempotencyToken":true + }, + "FileCacheType":{ + "shape":"FileCacheType", + "documentation":"

The type of cache that you're creating, which must be LUSTRE.

" + }, + "FileCacheTypeVersion":{ + "shape":"FileSystemTypeVersion", + "documentation":"

Sets the Lustre version for the cache that you're creating, which must be 2.12.

" + }, + "StorageCapacity":{ + "shape":"StorageCapacity", + "documentation":"

The storage capacity of the cache in gibibytes (GiB). Valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

" + }, + "SubnetIds":{"shape":"SubnetIds"}, + "SecurityGroupIds":{ + "shape":"SecurityGroupIds", + "documentation":"

A list of IDs specifying the security groups to apply to all network interfaces created for Amazon File Cache access. This list isn't returned in later requests to describe the cache.

" + }, + "Tags":{"shape":"Tags"}, + "CopyTagsToDataRepositoryAssociations":{ + "shape":"CopyTagsToDataRepositoryAssociations", + "documentation":"

A boolean flag indicating whether tags for the cache should be copied to data repository associations. This value defaults to false.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.

" + }, + "LustreConfiguration":{ + "shape":"CreateFileCacheLustreConfiguration", + "documentation":"

The configuration for the Amazon File Cache resource being created.

" + }, + "DataRepositoryAssociations":{ + "shape":"CreateFileCacheDataRepositoryAssociations", + "documentation":"

A list of up to 8 configurations for data repository associations (DRAs) to be created during the cache creation. The DRAs link the cache to either an Amazon S3 data repository or a Network File System (NFS) data repository that supports the NFSv3 protocol.

The DRA configurations must meet the following requirements:

DRA automatic import and automatic export is not supported.

" + } + } + }, + "CreateFileCacheResponse":{ + "type":"structure", + "members":{ + "FileCache":{ + "shape":"FileCacheCreating", + "documentation":"

A description of the cache that was created.

" + } + } + }, "CreateFileSystemFromBackupRequest":{ "type":"structure", "required":[ @@ -1308,6 +1479,10 @@ "OpenZFSConfiguration":{ "shape":"CreateFileSystemOpenZFSConfiguration", "documentation":"

The OpenZFS configuration for the file system that's being created.

" + }, + "StorageCapacity":{ + "shape":"StorageCapacity", + "documentation":"

Sets the storage capacity of the OpenZFS file system that you're creating from a backup, in gibibytes (GiB). Valid values are from 64 GiB up to 524,288 GiB (512 TiB). However, the value that you specify must be equal to or greater than the backup's storage capacity value. If you don't use the StorageCapacity parameter, the default is the backup's StorageCapacity value.

If used to create a file system other than OpenZFS, you must provide a value that matches the backup's StorageCapacity value. If you provide any other value, Amazon FSx responds with a 400 Bad Request.

" } }, "documentation":"

The request object for the CreateFileSystemFromBackup operation.

" @@ -1824,7 +1999,7 @@ "documentation":"

The Domain Name Service (DNS) name for the file system. You can mount your file system using its DNS name.

", "max":275, "min":16, - "pattern":"^(fsi?-[0-9a-f]{8,}\\..{4,253})$" + "pattern":"^((fs|fc)i?-[0-9a-f]{8,}\\..{4,253})$" }, "DailyTime":{ "type":"string", @@ -1851,33 +2026,49 @@ "FileSystemId":{"shape":"FileSystemId"}, "Lifecycle":{ "shape":"DataRepositoryLifecycle", - "documentation":"

Describes the state of a data repository association. The lifecycle can have the following values:

" + "documentation":"

Describes the state of a data repository association. The lifecycle can have the following values:

" }, "FailureDetails":{"shape":"DataRepositoryFailureDetails"}, "FileSystemPath":{ "shape":"Namespace", - "documentation":"

A path on the file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2.

This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

If you specify only a forward slash (/) as the file system path, you can link only 1 data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.

" + "documentation":"

A path on the Amazon FSx for Lustre file system that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping file system paths. For example, if a data repository is associated with file system path /ns1/, then you cannot link another data repository with file system path /ns1/ns2.

This path specifies where in your file system files will be exported from or imported to. This file system directory can be linked to only one Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

If you specify only a forward slash (/) as the file system path, you can link only one data repository to the file system. You can only specify \"/\" as the file system path for the first data repository associated with a file system.

" }, "DataRepositoryPath":{ "shape":"ArchivePath", - "documentation":"

The path to the Amazon S3 data repository that will be linked to the file system. The path can be an S3 bucket or prefix in the format s3://myBucket/myPrefix/. This path specifies where in the S3 data repository files will be imported from or exported to.

" + "documentation":"

The path to the data repository that will be linked to the cache or file system.

" }, "BatchImportMetaDataOnCreate":{ "shape":"BatchImportMetaDataOnCreate", - "documentation":"

A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to true.

" + "documentation":"

A boolean flag indicating whether an import data repository task to import metadata should run after the data repository association is created. The task runs if this flag is set to true.

BatchImportMetaDataOnCreate is not supported for data repositories linked to an Amazon File Cache resource.

" }, "ImportedFileChunkSize":{ "shape":"Megabytes", - "documentation":"

For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

" + "documentation":"

For files imported from a data repository, this value determines the stripe count and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single file can be striped across is limited by the total number of disks that make up the file system or cache.

The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). Amazon S3 objects have a maximum size of 5 TB.

" }, "S3":{ "shape":"S3DataRepositoryConfiguration", - "documentation":"

The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration defines which file events (new, changed, or deleted files or directories) are automatically imported from the linked data repository to the file system or automatically exported from the file system to the data repository.

" + "documentation":"

The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association.

" }, "Tags":{"shape":"Tags"}, - "CreationTime":{"shape":"CreationTime"} + "CreationTime":{"shape":"CreationTime"}, + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The globally unique ID of the Amazon File Cache resource.

" + }, + "FileCachePath":{ + "shape":"Namespace", + "documentation":"

A path on the Amazon File Cache that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the path is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/, then you cannot link another data repository with cache path /ns1/ns2.

This path specifies the directory in your cache where files will be exported from. This cache directory can be linked to only one data repository (S3 or NFS) and no other data repository can be linked to the directory.

The cache path can only be set to root (/) on an NFS DRA when DataRepositorySubdirectories is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.

The cache path cannot be set to root (/) for an S3 DRA.

" + }, + "DataRepositorySubdirectories":{ + "shape":"SubDirectoriesPaths", + "documentation":"

For Amazon File Cache, a list of NFS Exports that will be linked with an NFS data repository association. All the subdirectories must be on a single NFS file system. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories.

" + }, + "NFS":{ + "shape":"NFSDataRepositoryConfiguration", + "documentation":"

The configuration for an NFS data repository linked to an Amazon File Cache resource with a data repository association.

" + } }, - "documentation":"

The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket. The data repository association configuration object is returned in the response of the following operations:

Data repository associations are supported only for file systems with the Persistent_2 deployment type.

" + "documentation":"

The configuration of a data repository association that links an Amazon FSx for Lustre file system to an Amazon S3 bucket or an Amazon File Cache resource to an Amazon S3 bucket or an NFS file system. The data repository association configuration object is returned in the response of the following operations:

Data repository associations are supported only for an Amazon FSx for Lustre file system with the Persistent_2 deployment type and for an Amazon File Cache resource.

" }, "DataRepositoryAssociationId":{ "type":"string", @@ -1954,8 +2145,7 @@ "TaskId", "Lifecycle", "Type", - "CreationTime", - "FileSystemId" + "CreationTime" ], "members":{ "TaskId":{ @@ -1964,27 +2154,30 @@ }, "Lifecycle":{ "shape":"DataRepositoryTaskLifecycle", - "documentation":"

The lifecycle status of the data repository task, as follows:

You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the PENDING or EXECUTING states. Please retry when the data repository task is finished (with a status of CANCELED, SUCCEEDED, or FAILED). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.

" + "documentation":"

The lifecycle status of the data repository task, as follows:

You cannot delete an FSx for Lustre file system if there are data repository tasks for the file system in the PENDING or EXECUTING states. Please retry when the data repository task is finished (with a status of CANCELED, SUCCEEDED, or FAILED). You can use the DescribeDataRepositoryTask action to monitor the task status. Contact the FSx team if you need to delete your file system immediately.

" }, "Type":{ "shape":"DataRepositoryTaskType", - "documentation":"

The type of data repository task.

" + "documentation":"

The type of data repository task.

" }, "CreationTime":{"shape":"CreationTime"}, "StartTime":{ "shape":"StartTime", - "documentation":"

The time that Amazon FSx began processing the task.

" + "documentation":"

The time the system began processing the task.

" }, "EndTime":{ "shape":"EndTime", - "documentation":"

The time that Amazon FSx completed processing the task, populated after the task is complete.

" + "documentation":"

The time the system completed processing the task, populated after the task is complete.

" }, "ResourceARN":{"shape":"ResourceARN"}, "Tags":{"shape":"Tags"}, - "FileSystemId":{"shape":"FileSystemId"}, + "FileSystemId":{ + "shape":"FileSystemId", + "documentation":"

The globally unique ID of the file system.

" + }, "Paths":{ "shape":"DataRepositoryTaskPaths", - "documentation":"

An array of paths on the Amazon FSx for Lustre file system that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.

(Default) If Paths is not specified, Amazon FSx uses the file system root directory.

" + "documentation":"

An array of paths that specify the data for the data repository task to process. For example, in an EXPORT_TO_REPOSITORY task, the paths specify which data to export to the linked data repository.

(Default) If Paths is not specified, Amazon FSx uses the file system root directory.

" }, "FailureDetails":{ "shape":"DataRepositoryTaskFailureDetails", @@ -1994,9 +2187,17 @@ "shape":"DataRepositoryTaskStatus", "documentation":"

Provides the status of the number of files that the task has processed successfully and failed to process.

" }, - "Report":{"shape":"CompletionReport"} + "Report":{"shape":"CompletionReport"}, + "CapacityToRelease":{ + "shape":"CapacityToRelease", + "documentation":"

Specifies the amount of data to release, in GiB, by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.

" + }, + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The system-generated, unique ID of the cache.

" + } }, - "documentation":"

A description of the data repository task. You use data repository tasks to perform bulk transfer operations between your Amazon FSx file system and a linked data repository.

" + "documentation":"

A description of the data repository task. You use data repository tasks to perform bulk transfer operations between an Amazon FSx for Lustre file system and a linked data repository. An Amazon File Cache resource uses a task to automatically release files from the cache.

" }, "DataRepositoryTaskEnded":{ "type":"structure", @@ -2040,7 +2241,8 @@ "enum":[ "file-system-id", "task-lifecycle", - "data-repository-association-id" + "data-repository-association-id", + "file-cache-id" ] }, "DataRepositoryTaskFilterValue":{ @@ -2107,6 +2309,10 @@ "LastUpdatedTime":{ "shape":"LastUpdatedTime", "documentation":"

The time at which the task status was last updated.

" + }, + "ReleasedCapacity":{ + "shape":"ReleasedCapacity", + "documentation":"

The total amount of data, in GiB, released by an Amazon File Cache AUTO_RELEASE_DATA task that automatically releases files from the cache.

" } }, "documentation":"

Provides the task status showing a running total of the total number of files to be processed, the number successfully processed, and the number of files the task failed to process.

" @@ -2115,7 +2321,9 @@ "type":"string", "enum":[ "EXPORT_TO_REPOSITORY", - "IMPORT_METADATA_FROM_REPOSITORY" + "IMPORT_METADATA_FROM_REPOSITORY", + "RELEASE_DATA_FROM_FILESYSTEM", + "AUTO_RELEASE_DATA" ] }, "DataRepositoryTasks":{ @@ -2156,10 +2364,7 @@ "DeleteDataInFileSystem":{"type":"boolean"}, "DeleteDataRepositoryAssociationRequest":{ "type":"structure", - "required":[ - "AssociationId", - "DeleteDataInFileSystem" - ], + "required":["AssociationId"], "members":{ "AssociationId":{ "shape":"DataRepositoryAssociationId", @@ -2192,6 +2397,33 @@ } } }, + "DeleteFileCacheRequest":{ + "type":"structure", + "required":["FileCacheId"], + "members":{ + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The ID of the cache that's being deleted.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + } + } + }, + "DeleteFileCacheResponse":{ + "type":"structure", + "members":{ + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The ID of the cache that's being deleted.

" + }, + "Lifecycle":{ + "shape":"FileCacheLifecycle", + "documentation":"

The cache lifecycle for the deletion request. If the DeleteFileCache operation is successful, this status is DELETING.

" + } + } + }, "DeleteFileSystemLustreConfiguration":{ "type":"structure", "members":{ @@ -2512,7 +2744,7 @@ "members":{ "Associations":{ "shape":"DataRepositoryAssociations", - "documentation":"

An array of one ore more data repository association descriptions.

" + "documentation":"

An array of one or more data repository association descriptions.

" }, "NextToken":{"shape":"NextToken"} } @@ -2542,6 +2774,27 @@ "NextToken":{"shape":"NextToken"} } }, + "DescribeFileCachesRequest":{ + "type":"structure", + "members":{ + "FileCacheIds":{ + "shape":"FileCacheIds", + "documentation":"

IDs of the caches whose descriptions you want to retrieve (String).

" + }, + "MaxResults":{"shape":"MaxResults"}, + "NextToken":{"shape":"NextToken"} + } + }, + "DescribeFileCachesResponse":{ + "type":"structure", + "members":{ + "FileCaches":{ + "shape":"FileCaches", + "documentation":"

The response object for the DescribeFileCaches operation.

" + }, + "NextToken":{"shape":"NextToken"} + } + }, "DescribeFileSystemAliasesRequest":{ "type":"structure", "required":["FileSystemId"], @@ -2792,6 +3045,246 @@ "max":3 }, "FailedCount":{"type":"long"}, + "FileCache":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"AWSAccountId"}, + "CreationTime":{"shape":"CreationTime"}, + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The system-generated, unique ID of the cache.

" + }, + "FileCacheType":{ + "shape":"FileCacheType", + "documentation":"

The type of cache, which must be LUSTRE.

" + }, + "FileCacheTypeVersion":{ + "shape":"FileSystemTypeVersion", + "documentation":"

The Lustre version of the cache, which must be 2.12.

" + }, + "Lifecycle":{ + "shape":"FileCacheLifecycle", + "documentation":"

The lifecycle status of the cache. The following are the possible values and what they mean:

" + }, + "FailureDetails":{ + "shape":"FileCacheFailureDetails", + "documentation":"

A structure providing details of any failures that occurred.

" + }, + "StorageCapacity":{ + "shape":"StorageCapacity", + "documentation":"

The storage capacity of the cache in gibibytes (GiB).

" + }, + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "NetworkInterfaceIds":{"shape":"NetworkInterfaceIds"}, + "DNSName":{ + "shape":"DNSName", + "documentation":"

The Domain Name System (DNS) name for the cache.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.

" + }, + "ResourceARN":{"shape":"ResourceARN"}, + "LustreConfiguration":{ + "shape":"FileCacheLustreConfiguration", + "documentation":"

The configuration for the Amazon File Cache resource.

" + }, + "DataRepositoryAssociationIds":{ + "shape":"DataRepositoryAssociationIds", + "documentation":"

A list of IDs of data repository associations that are associated with this cache.

" + } + }, + "documentation":"

A description of a specific Amazon File Cache resource, which is a response object from the DescribeFileCaches operation.

" + }, + "FileCacheCreating":{ + "type":"structure", + "members":{ + "OwnerId":{"shape":"AWSAccountId"}, + "CreationTime":{"shape":"CreationTime"}, + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The system-generated, unique ID of the cache.

" + }, + "FileCacheType":{ + "shape":"FileCacheType", + "documentation":"

The type of cache, which must be LUSTRE.

" + }, + "FileCacheTypeVersion":{ + "shape":"FileSystemTypeVersion", + "documentation":"

The Lustre version of the cache, which must be 2.12.

" + }, + "Lifecycle":{ + "shape":"FileCacheLifecycle", + "documentation":"

The lifecycle status of the cache. The following are the possible values and what they mean:

" + }, + "FailureDetails":{ + "shape":"FileCacheFailureDetails", + "documentation":"

A structure providing details of any failures that occurred.

" + }, + "StorageCapacity":{ + "shape":"StorageCapacity", + "documentation":"

The storage capacity of the cache in gibibytes (GiB).

" + }, + "VpcId":{"shape":"VpcId"}, + "SubnetIds":{"shape":"SubnetIds"}, + "NetworkInterfaceIds":{"shape":"NetworkInterfaceIds"}, + "DNSName":{ + "shape":"DNSName", + "documentation":"

The Domain Name System (DNS) name for the cache.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

Specifies the ID of the Key Management Service (KMS) key to use for encrypting data on an Amazon File Cache. If a KmsKeyId isn't specified, the Amazon FSx-managed KMS key for your account is used. For more information, see Encrypt in the Key Management Service API Reference.

" + }, + "ResourceARN":{"shape":"ResourceARN"}, + "Tags":{"shape":"Tags"}, + "CopyTagsToDataRepositoryAssociations":{ + "shape":"CopyTagsToDataRepositoryAssociations", + "documentation":"

A boolean flag indicating whether tags for the cache should be copied to data repository associations.

" + }, + "LustreConfiguration":{ + "shape":"FileCacheLustreConfiguration", + "documentation":"

The configuration for the Amazon File Cache resource.

" + }, + "DataRepositoryAssociationIds":{ + "shape":"DataRepositoryAssociationIds", + "documentation":"

A list of IDs of data repository associations that are associated with this cache.

" + } + }, + "documentation":"

The response object for the Amazon File Cache resource being created in the CreateFileCache operation.

" + }, + "FileCacheDataRepositoryAssociation":{ + "type":"structure", + "required":[ + "FileCachePath", + "DataRepositoryPath" + ], + "members":{ + "FileCachePath":{ + "shape":"Namespace", + "documentation":"

A path on the cache that points to a high-level directory (such as /ns1/) or subdirectory (such as /ns1/subdir/) that will be mapped 1-1 with DataRepositoryPath. The leading forward slash in the name is required. Two data repository associations cannot have overlapping cache paths. For example, if a data repository is associated with cache path /ns1/, then you cannot link another data repository with cache path /ns1/ns2.

This path specifies where in your cache files will be exported from. This cache directory can be linked to only one data repository, and no data repository other can be linked to the directory.

The cache path can only be set to root (/) on an NFS DRA when DataRepositorySubdirectories is specified. If you specify root (/) as the cache path, you can create only one DRA on the cache.

The cache path cannot be set to root (/) for an S3 DRA.

" + }, + "DataRepositoryPath":{ + "shape":"ArchivePath", + "documentation":"

The path to the S3 or NFS data repository that links to the cache. You must provide one of the following paths:

" + }, + "DataRepositorySubdirectories":{ + "shape":"SubDirectoriesPaths", + "documentation":"

A list of NFS Exports that will be linked with this data repository association. The Export paths are in the format /exportpath1. To use this parameter, you must configure DataRepositoryPath as the domain name of the NFS file system. The NFS file system domain name in effect is the root of the subdirectories. Note that DataRepositorySubdirectories is not supported for S3 data repositories.

" + }, + "NFS":{ + "shape":"FileCacheNFSConfiguration", + "documentation":"

The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.

" + } + }, + "documentation":"

The configuration for a data repository association (DRA) to be created during the Amazon File Cache resource creation. The DRA links the cache to either an Amazon S3 bucket or prefix, or a Network File System (NFS) data repository that supports the NFSv3 protocol.

The DRA does not support automatic import or automatic export.

" + }, + "FileCacheFailureDetails":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"ErrorMessage", + "documentation":"

A message describing any failures that occurred.

" + } + }, + "documentation":"

A structure providing details of any failures that occurred.

" + }, + "FileCacheId":{ + "type":"string", + "max":21, + "min":11, + "pattern":"^(fc-[0-9a-f]{8,})$" + }, + "FileCacheIds":{ + "type":"list", + "member":{"shape":"FileCacheId"}, + "max":50 + }, + "FileCacheLifecycle":{ + "type":"string", + "enum":[ + "AVAILABLE", + "CREATING", + "DELETING", + "UPDATING", + "FAILED" + ] + }, + "FileCacheLustreConfiguration":{ + "type":"structure", + "members":{ + "PerUnitStorageThroughput":{ + "shape":"PerUnitStorageThroughput", + "documentation":"

Per unit storage throughput represents the megabytes per second of read or write throughput per 1 tebibyte of storage provisioned. Cache throughput capacity is equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). The only supported value is 1000.

" + }, + "DeploymentType":{ + "shape":"FileCacheLustreDeploymentType", + "documentation":"

The deployment type of the Amazon File Cache resource, which must be CACHE_1.

" + }, + "MountName":{ + "shape":"LustreFileSystemMountName", + "documentation":"

You use the MountName value when mounting the cache. If you pass a cache ID to the DescribeFileCaches operation, it returns the the MountName value as part of the cache's description.

" + }, + "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"}, + "MetadataConfiguration":{ + "shape":"FileCacheLustreMetadataConfiguration", + "documentation":"

The configuration for a Lustre MDT (Metadata Target) storage volume.

" + }, + "LogConfiguration":{ + "shape":"LustreLogConfiguration", + "documentation":"

The configuration for Lustre logging used to write the enabled logging events for your Amazon File Cache resource to Amazon CloudWatch Logs.

" + } + }, + "documentation":"

The configuration for the Amazon File Cache resource.

" + }, + "FileCacheLustreDeploymentType":{ + "type":"string", + "enum":["CACHE_1"] + }, + "FileCacheLustreMetadataConfiguration":{ + "type":"structure", + "required":["StorageCapacity"], + "members":{ + "StorageCapacity":{ + "shape":"MetadataStorageCapacity", + "documentation":"

The storage capacity of the Lustre MDT (Metadata Target) storage volume in gibibytes (GiB). The only supported value is 2400 GiB.

" + } + }, + "documentation":"

The configuration for a Lustre MDT (Metadata Target) storage volume. The metadata on Amazon File Cache is managed by a Lustre Metadata Server (MDS) while the actual metadata is persisted on an MDT.

" + }, + "FileCacheNFSConfiguration":{ + "type":"structure", + "required":["Version"], + "members":{ + "Version":{ + "shape":"NfsVersion", + "documentation":"

The version of the NFS (Network File System) protocol of the NFS data repository. The only supported value is NFS3, which indicates that the data repository must support the NFSv3 protocol.

" + }, + "DnsIps":{ + "shape":"RepositoryDnsIps", + "documentation":"

A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.

" + } + }, + "documentation":"

The configuration for an NFS data repository association (DRA) created during the creation of the Amazon File Cache resource.

" + }, + "FileCacheNotFound":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

No caches were found based upon supplied parameters.

", + "exception":true + }, + "FileCacheType":{ + "type":"string", + "enum":["LUSTRE"] + }, + "FileCaches":{ + "type":"list", + "member":{"shape":"FileCache"}, + "max":50 + }, "FileSystem":{ "type":"structure", "members":{ @@ -3009,7 +3502,9 @@ "backup-type", "file-system-type", "volume-id", - "data-repository-type" + "data-repository-type", + "file-cache-id", + "file-cache-type" ] }, "FilterValue":{ @@ -3321,14 +3816,14 @@ "members":{ "Level":{ "shape":"LustreAccessAuditLogLevel", - "documentation":"

The data repository events that are logged by Amazon FSx.

" + "documentation":"

The data repository events that are logged by Amazon FSx.

Note that Amazon File Cache uses a default setting of WARN_ERROR, which can't be changed.

" }, "Destination":{ "shape":"GeneralARN", "documentation":"

The Amazon Resource Name (ARN) that specifies the destination of the logs. The destination can be any Amazon CloudWatch Logs log group ARN. The destination ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

" } }, - "documentation":"

The configuration for Lustre logging used to write the enabled logging events for your file system to Amazon CloudWatch Logs.

When logging is enabled, Lustre logs error and warning events from data repository operations such as automatic export and data repository tasks. To learn more about Lustre logging, see Logging with Amazon CloudWatch Logs.

" + "documentation":"

The configuration for Lustre logging used to write the enabled logging events for your Amazon FSx for Lustre file system or Amazon File Cache resource to Amazon CloudWatch Logs.

" }, "LustreLogCreateConfiguration":{ "type":"structure", @@ -3340,10 +3835,10 @@ }, "Destination":{ "shape":"GeneralARN", - "documentation":"

The Amazon Resource Name (ARN) that specifies the destination of the logs.

The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:

" + "documentation":"

The Amazon Resource Name (ARN) that specifies the destination of the logs.

The destination can be any Amazon CloudWatch Logs log group ARN, with the following requirements:

" } }, - "documentation":"

The Lustre logging configuration used when creating or updating an Amazon FSx for Lustre file system. Lustre logging writes the enabled logging events for your file system to Amazon CloudWatch Logs.

Error and warning events can be logged from the following data repository operations:

To learn more about Lustre logging, see Logging to Amazon CloudWatch Logs.

" + "documentation":"

The Lustre logging configuration used when creating or updating an Amazon FSx for Lustre file system. An Amazon File Cache is created with Lustre logging enabled by default, with a setting of WARN_ERROR for the logging events. which can't be changed.

Lustre logging writes the enabled logging events for your file system or cache to Amazon CloudWatch Logs.

" }, "LustreNoSquashNid":{ "type":"string", @@ -3393,6 +3888,19 @@ "max":4096, "min":8 }, + "MetadataStorageCapacity":{ + "type":"integer", + "max":2147483647, + "min":0 + }, + "MissingFileCacheConfiguration":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

A cache configuration is required for this operation.

", + "exception":true + }, "MissingFileSystemConfiguration":{ "type":"structure", "members":{ @@ -3409,6 +3917,25 @@ "documentation":"

A volume configuration is required for this operation.

", "exception":true }, + "NFSDataRepositoryConfiguration":{ + "type":"structure", + "required":["Version"], + "members":{ + "Version":{ + "shape":"NfsVersion", + "documentation":"

The version of the NFS (Network File System) protocol of the NFS data repository. Currently, the only supported value is NFS3, which indicates that the data repository must support the NFSv3 protocol.

" + }, + "DnsIps":{ + "shape":"RepositoryDnsIps", + "documentation":"

A list of up to 2 IP addresses of DNS servers used to resolve the NFS file system domain name. The provided IP addresses can either be the IP addresses of a DNS forwarder or resolver that the customer manages and runs inside the customer VPC, or the IP addresses of the on-premises DNS servers.

" + }, + "AutoExportPolicy":{ + "shape":"AutoExportPolicy", + "documentation":"

This parameter is not supported for Amazon File Cache.

" + } + }, + "documentation":"

The configuration for a data repository association that links an Amazon File Cache resource to an NFS data repository.

" + }, "Namespace":{ "type":"string", "max":4096, @@ -3441,6 +3968,10 @@ "min":1, "pattern":"^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=)?$" }, + "NfsVersion":{ + "type":"string", + "enum":["NFS3"] + }, "NotServiceResourceError":{ "type":"structure", "required":["ResourceARN"], @@ -3830,6 +4361,7 @@ "FileSystem":{"shape":"FileSystem"} } }, + "ReleasedCapacity":{"type":"long"}, "ReportFormat":{ "type":"string", "enum":["REPORT_CSV_20191124"] @@ -3838,6 +4370,11 @@ "type":"string", "enum":["FAILED_FILES_ONLY"] }, + "RepositoryDnsIps":{ + "type":"list", + "member":{"shape":"IpAddress"}, + "max":10 + }, "RequestTime":{"type":"timestamp"}, "ResourceARN":{ "type":"string", @@ -3952,7 +4489,7 @@ "documentation":"

Specifies the type of updated objects (new, changed, deleted) that will be automatically exported from your file system to the linked S3 bucket.

" } }, - "documentation":"

The configuration for an Amazon S3 data repository linked to an Amazon FSx Lustre file system with a data repository association. The configuration consists of an AutoImportPolicy that defines file events on the data repository are automatically imported to the file system and an AutoExportPolicy that defines which file events on the file system are automatically exported to the data repository. File events are when files or directories are added, changed, or deleted on the file system or the data repository.

" + "documentation":"

The configuration for an Amazon S3 data repository linked to an Amazon FSx for Lustre file system with a data repository association. The configuration consists of an AutoImportPolicy that defines which file events on the data repository are automatically imported to the file system and an AutoExportPolicy that defines which file events on the file system are automatically exported to the data repository. File events are when files or directories are added, changed, or deleted on the file system or the data repository.

Data repository associations on Amazon File Cache don't use S3DataRepositoryConfiguration because they don't support automatic import or automatic export.

" }, "SecurityGroupId":{ "type":"string", @@ -3964,7 +4501,7 @@ "SecurityGroupIds":{ "type":"list", "member":{"shape":"SecurityGroupId"}, - "documentation":"

A list of security group IDs.

", + "documentation":"

A list of IDs specifying the security groups to apply to all network interfaces created for file system access. This list isn't returned in later requests to describe the file system.

", "max":50 }, "SecurityStyle":{ @@ -4067,7 +4604,8 @@ "TOTAL_IN_PROGRESS_COPY_BACKUPS", "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM", "VOLUMES_PER_FILE_SYSTEM", - "TOTAL_SSD_IOPS" + "TOTAL_SSD_IOPS", + "FILE_CACHE_COUNT" ] }, "ServiceLimitExceeded":{ @@ -4367,6 +4905,11 @@ "member":{"shape":"StorageVirtualMachine"}, "max":50 }, + "SubDirectoriesPaths":{ + "type":"list", + "member":{"shape":"Namespace"}, + "max":500 + }, "SubnetId":{ "type":"string", "documentation":"

The ID for a subnet. A subnet is a range of IP addresses in your virtual private cloud (VPC). For more information, see VPC and subnets in the Amazon VPC User Guide.

", @@ -4377,7 +4920,7 @@ "SubnetIds":{ "type":"list", "member":{"shape":"SubnetId"}, - "documentation":"

A list of subnet IDs. Currently, you can specify only one subnet ID in a call to the CreateFileSystem operation.

", + "documentation":"

A list of subnet IDs that the cache will be accessible from. You can specify only one subnet ID in a call to the CreateFileCache operation.

", "max":50 }, "SucceededCount":{"type":"long"}, @@ -4598,6 +5141,40 @@ } } }, + "UpdateFileCacheLustreConfiguration":{ + "type":"structure", + "members":{ + "WeeklyMaintenanceStartTime":{"shape":"WeeklyTime"} + }, + "documentation":"

The configuration update for an Amazon File Cache resource.

" + }, + "UpdateFileCacheRequest":{ + "type":"structure", + "required":["FileCacheId"], + "members":{ + "FileCacheId":{ + "shape":"FileCacheId", + "documentation":"

The ID of the cache that you are updating.

" + }, + "ClientRequestToken":{ + "shape":"ClientRequestToken", + "idempotencyToken":true + }, + "LustreConfiguration":{ + "shape":"UpdateFileCacheLustreConfiguration", + "documentation":"

The configuration updates for an Amazon File Cache resource.

" + } + } + }, + "UpdateFileCacheResponse":{ + "type":"structure", + "members":{ + "FileCache":{ + "shape":"FileCache", + "documentation":"

A description of the cache that was updated.

" + } + } + }, "UpdateFileSystemLustreConfiguration":{ "type":"structure", "members":{ diff --git a/botocore/data/migrationhuborchestrator/2021-08-28/paginators-1.json b/botocore/data/migrationhuborchestrator/2021-08-28/paginators-1.json new file mode 100644 index 0000000000..4c452221fb --- /dev/null +++ b/botocore/data/migrationhuborchestrator/2021-08-28/paginators-1.json @@ -0,0 +1,46 @@ +{ + "pagination": { + "ListPlugins": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "plugins" + }, + "ListTemplateStepGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "templateStepGroupSummary" + }, + "ListTemplateSteps": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "templateStepSummaryList" + }, + "ListTemplates": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "templateSummary" + }, + "ListWorkflowStepGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workflowStepGroupsSummary" + }, + "ListWorkflowSteps": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workflowStepsSummary" + }, + "ListWorkflows": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "migrationWorkflowSummary" + } + } +} diff --git a/botocore/data/migrationhuborchestrator/2021-08-28/service-2.json b/botocore/data/migrationhuborchestrator/2021-08-28/service-2.json new file mode 100644 index 0000000000..89f3165ba5 --- /dev/null +++ b/botocore/data/migrationhuborchestrator/2021-08-28/service-2.json @@ -0,0 +1,2910 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-08-28", + "endpointPrefix":"migrationhub-orchestrator", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Migration Hub Orchestrator", + "serviceId":"MigrationHubOrchestrator", + "signatureVersion":"v4", + "signingName":"migrationhub-orchestrator", + "uid":"migrationhuborchestrator-2021-08-28" + }, + "operations":{ + "CreateWorkflow":{ + "name":"CreateWorkflow", + "http":{ + "method":"POST", + "requestUri":"/migrationworkflow/", + "responseCode":200 + }, + "input":{"shape":"CreateMigrationWorkflowRequest"}, + "output":{"shape":"CreateMigrationWorkflowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Create a workflow to orchestrate your migrations.

" + }, + "CreateWorkflowStep":{ + "name":"CreateWorkflowStep", + "http":{ + "method":"POST", + "requestUri":"/workflowstep", + "responseCode":200 + }, + "input":{"shape":"CreateWorkflowStepRequest"}, + "output":{"shape":"CreateWorkflowStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Create a step in the migration workflow.

" + }, + "CreateWorkflowStepGroup":{ + "name":"CreateWorkflowStepGroup", + "http":{ + "method":"POST", + "requestUri":"/workflowstepgroups", + "responseCode":200 + }, + "input":{"shape":"CreateWorkflowStepGroupRequest"}, + "output":{"shape":"CreateWorkflowStepGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Create a step group in a migration workflow.

" + }, + "DeleteWorkflow":{ + "name":"DeleteWorkflow", + "http":{ + "method":"DELETE", + "requestUri":"/migrationworkflow/{id}", + "responseCode":202 + }, + "input":{"shape":"DeleteMigrationWorkflowRequest"}, + "output":{"shape":"DeleteMigrationWorkflowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Delete a migration workflow. You must pause a running workflow in Migration Hub Orchestrator console to delete it.

", + "idempotent":true + }, + "DeleteWorkflowStep":{ + "name":"DeleteWorkflowStep", + "http":{ + "method":"DELETE", + "requestUri":"/workflowstep/{id}", + "responseCode":200 + }, + "input":{"shape":"DeleteWorkflowStepRequest"}, + "output":{"shape":"DeleteWorkflowStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Delete a step in a migration workflow. Pause the workflow to delete a running step.

", + "idempotent":true + }, + "DeleteWorkflowStepGroup":{ + "name":"DeleteWorkflowStepGroup", + "http":{ + "method":"DELETE", + "requestUri":"/workflowstepgroup/{id}", + "responseCode":202 + }, + "input":{"shape":"DeleteWorkflowStepGroupRequest"}, + "output":{"shape":"DeleteWorkflowStepGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Delete a step group in a migration workflow.

", + "idempotent":true + }, + "GetTemplate":{ + "name":"GetTemplate", + "http":{ + "method":"GET", + "requestUri":"/migrationworkflowtemplate/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMigrationWorkflowTemplateRequest"}, + "output":{"shape":"GetMigrationWorkflowTemplateResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get the template you want to use for creating a migration workflow.

" + }, + "GetTemplateStep":{ + "name":"GetTemplateStep", + "http":{ + "method":"GET", + "requestUri":"/templatestep/{id}", + "responseCode":200 + }, + "input":{"shape":"GetTemplateStepRequest"}, + "output":{"shape":"GetTemplateStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get a specific step in a template.

" + }, + "GetTemplateStepGroup":{ + "name":"GetTemplateStepGroup", + "http":{ + "method":"GET", + "requestUri":"/templates/{templateId}/stepgroups/{id}", + "responseCode":200 + }, + "input":{"shape":"GetTemplateStepGroupRequest"}, + "output":{"shape":"GetTemplateStepGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get a step group in a template.

" + }, + "GetWorkflow":{ + "name":"GetWorkflow", + "http":{ + "method":"GET", + "requestUri":"/migrationworkflow/{id}", + "responseCode":200 + }, + "input":{"shape":"GetMigrationWorkflowRequest"}, + "output":{"shape":"GetMigrationWorkflowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get migration workflow.

" + }, + "GetWorkflowStep":{ + "name":"GetWorkflowStep", + "http":{ + "method":"GET", + "requestUri":"/workflowstep/{id}", + "responseCode":200 + }, + "input":{"shape":"GetWorkflowStepRequest"}, + "output":{"shape":"GetWorkflowStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get a step in the migration workflow.

" + }, + "GetWorkflowStepGroup":{ + "name":"GetWorkflowStepGroup", + "http":{ + "method":"GET", + "requestUri":"/workflowstepgroup/{id}", + "responseCode":200 + }, + "input":{"shape":"GetWorkflowStepGroupRequest"}, + "output":{"shape":"GetWorkflowStepGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get the step group of a migration workflow.

" + }, + "ListPlugins":{ + "name":"ListPlugins", + "http":{ + "method":"GET", + "requestUri":"/plugins", + "responseCode":200 + }, + "input":{"shape":"ListPluginsRequest"}, + "output":{"shape":"ListPluginsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

List AWS Migration Hub Orchestrator plugins.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the tags added to a resource.

" + }, + "ListTemplateStepGroups":{ + "name":"ListTemplateStepGroups", + "http":{ + "method":"GET", + "requestUri":"/templatestepgroups/{templateId}", + "responseCode":200 + }, + "input":{"shape":"ListTemplateStepGroupsRequest"}, + "output":{"shape":"ListTemplateStepGroupsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the step groups in a template.

" + }, + "ListTemplateSteps":{ + "name":"ListTemplateSteps", + "http":{ + "method":"GET", + "requestUri":"/templatesteps", + "responseCode":200 + }, + "input":{"shape":"ListTemplateStepsRequest"}, + "output":{"shape":"ListTemplateStepsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the steps in a template.

" + }, + "ListTemplates":{ + "name":"ListTemplates", + "http":{ + "method":"GET", + "requestUri":"/migrationworkflowtemplates", + "responseCode":200 + }, + "input":{"shape":"ListMigrationWorkflowTemplatesRequest"}, + "output":{"shape":"ListMigrationWorkflowTemplatesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List the templates available in Migration Hub Orchestrator to create a migration workflow.

" + }, + "ListWorkflowStepGroups":{ + "name":"ListWorkflowStepGroups", + "http":{ + "method":"GET", + "requestUri":"/workflowstepgroups", + "responseCode":200 + }, + "input":{"shape":"ListWorkflowStepGroupsRequest"}, + "output":{"shape":"ListWorkflowStepGroupsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the step groups in a migration workflow.

" + }, + "ListWorkflowSteps":{ + "name":"ListWorkflowSteps", + "http":{ + "method":"GET", + "requestUri":"/workflow/{workflowId}/workflowstepgroups/{stepGroupId}/workflowsteps", + "responseCode":200 + }, + "input":{"shape":"ListWorkflowStepsRequest"}, + "output":{"shape":"ListWorkflowStepsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

List the steps in a workflow.

" + }, + "ListWorkflows":{ + "name":"ListWorkflows", + "http":{ + "method":"GET", + "requestUri":"/migrationworkflows", + "responseCode":200 + }, + "input":{"shape":"ListMigrationWorkflowsRequest"}, + "output":{"shape":"ListMigrationWorkflowsResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

List the migration workflows.

" + }, + "RetryWorkflowStep":{ + "name":"RetryWorkflowStep", + "http":{ + "method":"POST", + "requestUri":"/retryworkflowstep/{id}", + "responseCode":200 + }, + "input":{"shape":"RetryWorkflowStepRequest"}, + "output":{"shape":"RetryWorkflowStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retry a failed step in a migration workflow.

" + }, + "StartWorkflow":{ + "name":"StartWorkflow", + "http":{ + "method":"POST", + "requestUri":"/migrationworkflow/{id}/start", + "responseCode":200 + }, + "input":{"shape":"StartMigrationWorkflowRequest"}, + "output":{"shape":"StartMigrationWorkflowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Start a migration workflow.

" + }, + "StopWorkflow":{ + "name":"StopWorkflow", + "http":{ + "method":"POST", + "requestUri":"/migrationworkflow/{id}/stop", + "responseCode":200 + }, + "input":{"shape":"StopMigrationWorkflowRequest"}, + "output":{"shape":"StopMigrationWorkflowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Stop an ongoing migration workflow.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Tag a resource by specifying its Amazon Resource Name (ARN).

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the tags for a resource.

", + "idempotent":true + }, + "UpdateWorkflow":{ + "name":"UpdateWorkflow", + "http":{ + "method":"POST", + "requestUri":"/migrationworkflow/{id}", + "responseCode":200 + }, + "input":{"shape":"UpdateMigrationWorkflowRequest"}, + "output":{"shape":"UpdateMigrationWorkflowResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Update a migration workflow.

" + }, + "UpdateWorkflowStep":{ + "name":"UpdateWorkflowStep", + "http":{ + "method":"POST", + "requestUri":"/workflowstep/{id}", + "responseCode":200 + }, + "input":{"shape":"UpdateWorkflowStepRequest"}, + "output":{"shape":"UpdateWorkflowStepResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Update a step in a migration workflow.

" + }, + "UpdateWorkflowStepGroup":{ + "name":"UpdateWorkflowStepGroup", + "http":{ + "method":"POST", + "requestUri":"/workflowstepgroup/{id}", + "responseCode":202 + }, + "input":{"shape":"UpdateWorkflowStepGroupRequest"}, + "output":{"shape":"UpdateWorkflowStepGroupResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Update the step group in a migration workflow.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "ApplicationConfigurationName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "CreateMigrationWorkflowRequest":{ + "type":"structure", + "required":[ + "name", + "templateId", + "applicationConfigurationId", + "inputParameters" + ], + "members":{ + "name":{ + "shape":"CreateMigrationWorkflowRequestNameString", + "documentation":"

The name of the migration workflow.

" + }, + "description":{ + "shape":"CreateMigrationWorkflowRequestDescriptionString", + "documentation":"

The description of the migration workflow.

" + }, + "templateId":{ + "shape":"CreateMigrationWorkflowRequestTemplateIdString", + "documentation":"

The ID of the template.

" + }, + "applicationConfigurationId":{ + "shape":"CreateMigrationWorkflowRequestApplicationConfigurationIdString", + "documentation":"

The configuration ID of the application configured in Application Discovery Service.

" + }, + "inputParameters":{ + "shape":"StepInputParameters", + "documentation":"

The input parameters required to create a migration workflow.

" + }, + "stepTargets":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + }, + "tags":{ + "shape":"StringMap", + "documentation":"

The tags to add on a migration workflow.

" + } + } + }, + "CreateMigrationWorkflowRequestApplicationConfigurationIdString":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "CreateMigrationWorkflowRequestDescriptionString":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[-a-zA-Z0-9_.+, ]*" + }, + "CreateMigrationWorkflowRequestNameString":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "CreateMigrationWorkflowRequestTemplateIdString":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "CreateMigrationWorkflowResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the migration workflow.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the migration workflow.

" + }, + "templateId":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "adsApplicationConfigurationId":{ + "shape":"String", + "documentation":"

The configuration ID of the application configured in Application Discovery Service.

" + }, + "workflowInputs":{ + "shape":"StepInputParameters", + "documentation":"

The inputs for creating a migration workflow.

" + }, + "stepTargets":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was created.

" + }, + "tags":{ + "shape":"StringMap", + "documentation":"

The tags to add on a migration workflow.

" + } + } + }, + "CreateWorkflowStepGroupRequest":{ + "type":"structure", + "required":[ + "workflowId", + "name" + ], + "members":{ + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow that will contain the step group.

" + }, + "name":{ + "shape":"StepGroupName", + "documentation":"

The name of the step group.

" + }, + "description":{ + "shape":"StepGroupDescription", + "documentation":"

The description of the step group.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + } + } + }, + "CreateWorkflowStepGroupResponse":{ + "type":"structure", + "members":{ + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow that contains the step group.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step group.

" + }, + "id":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step group.

" + }, + "tools":{ + "shape":"ToolsList", + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group is created.

" + } + } + }, + "CreateWorkflowStepRequest":{ + "type":"structure", + "required":[ + "name", + "stepGroupId", + "workflowId", + "stepActionType" + ], + "members":{ + "name":{ + "shape":"MigrationWorkflowName", + "documentation":"

The name of the step.

" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "stepActionType":{ + "shape":"StepActionType", + "documentation":"

The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step.

" + }, + "description":{ + "shape":"MigrationWorkflowDescription", + "documentation":"

The description of the step.

" + }, + "workflowStepAutomationConfiguration":{ + "shape":"WorkflowStepAutomationConfiguration", + "documentation":"

The custom script to run tests on source or target environments.

" + }, + "stepTarget":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + }, + "outputs":{ + "shape":"WorkflowStepOutputList", + "documentation":"

The key value pairs added for the expected output.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step.

" + } + } + }, + "CreateWorkflowStepResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the step.

" + }, + "stepGroupId":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + } + } + }, + "DataType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "STRINGLIST", + "STRINGMAP" + ] + }, + "DeleteMigrationWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow you want to delete.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteMigrationWorkflowResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the migration workflow.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + } + } + }, + "DeleteWorkflowStepGroupRequest":{ + "type":"structure", + "required":[ + "workflowId", + "id" + ], + "members":{ + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + }, + "id":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group you want to delete.

", + "location":"uri", + "locationName":"id" + } + } + }, + "DeleteWorkflowStepGroupResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteWorkflowStepRequest":{ + "type":"structure", + "required":[ + "id", + "stepGroupId", + "workflowId" + ], + "members":{ + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step you want to delete.

", + "location":"uri", + "locationName":"id" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group that contains the step you want to delete.

", + "location":"querystring", + "locationName":"stepGroupId" + }, + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + } + } + }, + "DeleteWorkflowStepResponse":{ + "type":"structure", + "members":{ + } + }, + "GetMigrationWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetMigrationWorkflowResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the migration workflow.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the migration workflow.

" + }, + "templateId":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "adsApplicationConfigurationId":{ + "shape":"String", + "documentation":"

The configuration ID of the application configured in Application Discovery Service.

" + }, + "adsApplicationName":{ + "shape":"String", + "documentation":"

The name of the application configured in Application Discovery Service.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the migration workflow.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was created.

" + }, + "lastStartTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was last started.

" + }, + "lastStopTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was last stopped.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was last modified.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow ended.

" + }, + "tools":{ + "shape":"ToolsList", + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "totalSteps":{ + "shape":"Integer", + "documentation":"

The total number of steps in the migration workflow.

" + }, + "completedSteps":{ + "shape":"Integer", + "documentation":"

Get a list of completed steps in the migration workflow.

" + }, + "workflowInputs":{ + "shape":"StepInputParameters", + "documentation":"

The inputs required for creating the migration workflow.

" + }, + "tags":{ + "shape":"StringMap", + "documentation":"

The tags added to the migration workflow.

" + }, + "workflowBucket":{ + "shape":"String", + "documentation":"

The Amazon S3 bucket where the migration logs are stored.

" + } + } + }, + "GetMigrationWorkflowTemplateRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetMigrationWorkflowTemplateResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the template.

" + }, + "description":{ + "shape":"String", + "documentation":"

The time at which the template was last created.

" + }, + "inputs":{ + "shape":"TemplateInputList", + "documentation":"

The inputs provided for the creation of the migration workflow.

" + }, + "tools":{ + "shape":"ToolsList", + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "status":{ + "shape":"TemplateStatus", + "documentation":"

The status of the template.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the template was last created.

" + } + } + }, + "GetTemplateStepGroupRequest":{ + "type":"structure", + "required":[ + "templateId", + "id" + ], + "members":{ + "templateId":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

", + "location":"uri", + "locationName":"templateId" + }, + "id":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetTemplateStepGroupResponse":{ + "type":"structure", + "members":{ + "templateId":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "id":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step group.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step group.

" + }, + "status":{ + "shape":"StepGroupStatus", + "documentation":"

The status of the step group.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group was created.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group was last modified.

" + }, + "tools":{ + "shape":"ToolsList", + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + } + } + }, + "GetTemplateStepRequest":{ + "type":"structure", + "required":[ + "id", + "templateId", + "stepGroupId" + ], + "members":{ + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step.

", + "location":"uri", + "locationName":"id" + }, + "templateId":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

", + "location":"querystring", + "locationName":"templateId" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"querystring", + "locationName":"stepGroupId" + } + } + }, + "GetTemplateStepResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step.

" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

" + }, + "templateId":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step.

" + }, + "stepActionType":{ + "shape":"StepActionType", + "documentation":"

The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step.

" + }, + "creationTime":{ + "shape":"String", + "documentation":"

The time at which the step was created.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step.

" + }, + "outputs":{ + "shape":"StepOutputList", + "documentation":"

The outputs of the step.

" + }, + "stepAutomationConfiguration":{ + "shape":"StepAutomationConfiguration", + "documentation":"

The custom script to run tests on source or target environments.

" + } + } + }, + "GetWorkflowStepGroupRequest":{ + "type":"structure", + "required":[ + "id", + "workflowId" + ], + "members":{ + "id":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"uri", + "locationName":"id" + }, + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + } + } + }, + "GetWorkflowStepGroupResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step group.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step group.

" + }, + "status":{ + "shape":"StepGroupStatus", + "documentation":"

The status of the step group.

" + }, + "owner":{ + "shape":"Owner", + "documentation":"

The owner of the step group.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group was created.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group was last modified.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group ended.

" + }, + "tools":{ + "shape":"ToolsList", + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + } + } + }, + "GetWorkflowStepRequest":{ + "type":"structure", + "required":[ + "workflowId", + "stepGroupId", + "id" + ], + "members":{ + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

desThe ID of the step group.

", + "location":"querystring", + "locationName":"stepGroupId" + }, + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step.

", + "location":"uri", + "locationName":"id" + } + } + }, + "GetWorkflowStepResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + }, + "stepGroupId":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow.

" + }, + "stepId":{ + "shape":"String", + "documentation":"

The ID of the step.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step.

" + }, + "stepActionType":{ + "shape":"StepActionType", + "documentation":"

The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step.

" + }, + "owner":{ + "shape":"Owner", + "documentation":"

The owner of the step.

" + }, + "workflowStepAutomationConfiguration":{ + "shape":"WorkflowStepAutomationConfiguration", + "documentation":"

The custom script to run tests on source or target environments.

" + }, + "stepTarget":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + }, + "outputs":{ + "shape":"GetWorkflowStepResponseOutputsList", + "documentation":"

The outputs of the step.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step.

" + }, + "status":{ + "shape":"StepStatus", + "documentation":"

The status of the step.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the migration workflow.

" + }, + "scriptOutputLocation":{ + "shape":"String", + "documentation":"

The output location of the script.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step was created.

" + }, + "lastStartTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the workflow was last started.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step ended.

" + }, + "noOfSrvCompleted":{ + "shape":"Integer", + "documentation":"

The number of servers that have been migrated.

" + }, + "noOfSrvFailed":{ + "shape":"Integer", + "documentation":"

The number of servers that have failed to migrate.

" + }, + "totalNoOfSrv":{ + "shape":"Integer", + "documentation":"

The total number of servers that have been migrated.

" + } + } + }, + "GetWorkflowStepResponseOutputsList":{ + "type":"list", + "member":{"shape":"WorkflowStepOutput"}, + "max":5, + "min":0 + }, + "IPAddress":{ + "type":"string", + "max":15, + "min":0, + "pattern":"(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

An internal error has occurred.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListMigrationWorkflowTemplatesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "name":{ + "shape":"TemplateName", + "documentation":"

The name of the template.

", + "location":"querystring", + "locationName":"name" + } + } + }, + "ListMigrationWorkflowTemplatesResponse":{ + "type":"structure", + "required":["templateSummary"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "templateSummary":{ + "shape":"TemplateSummaryList", + "documentation":"

The summary of the template.

" + } + } + }, + "ListMigrationWorkflowsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "templateId":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

", + "location":"querystring", + "locationName":"templateId" + }, + "adsApplicationConfigurationName":{ + "shape":"ApplicationConfigurationName", + "documentation":"

The name of the application configured in Application Discovery Service.

", + "location":"querystring", + "locationName":"adsApplicationConfigurationName" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

", + "location":"querystring", + "locationName":"status" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the migration workflow.

", + "location":"querystring", + "locationName":"name" + } + } + }, + "ListMigrationWorkflowsResponse":{ + "type":"structure", + "required":["migrationWorkflowSummary"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "migrationWorkflowSummary":{ + "shape":"MigrationWorkflowSummaryList", + "documentation":"

The summary of the migration workflow.

" + } + } + }, + "ListPluginsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of plugins that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListPluginsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "plugins":{ + "shape":"PluginSummaries", + "documentation":"

Migration Hub Orchestrator plugins.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"TagMap", + "documentation":"

The tags added to a resource.

" + } + } + }, + "ListTemplateStepGroupsRequest":{ + "type":"structure", + "required":["templateId"], + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "templateId":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

", + "location":"uri", + "locationName":"templateId" + } + } + }, + "ListTemplateStepGroupsResponse":{ + "type":"structure", + "required":["templateStepGroupSummary"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "templateStepGroupSummary":{ + "shape":"TemplateStepGroupSummaryList", + "documentation":"

The summary of the step group in the template.

" + } + } + }, + "ListTemplateStepsRequest":{ + "type":"structure", + "required":[ + "templateId", + "stepGroupId" + ], + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "templateId":{ + "shape":"TemplateId", + "documentation":"

The ID of the template.

", + "location":"querystring", + "locationName":"templateId" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"querystring", + "locationName":"stepGroupId" + } + } + }, + "ListTemplateStepsResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "templateStepSummaryList":{ + "shape":"TemplateStepSummaryList", + "documentation":"

The list of summaries of steps in a template.

" + } + } + }, + "ListWorkflowStepGroupsRequest":{ + "type":"structure", + "required":["workflowId"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + } + } + }, + "ListWorkflowStepGroupsResponse":{ + "type":"structure", + "required":["workflowStepGroupsSummary"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "workflowStepGroupsSummary":{ + "shape":"WorkflowStepGroupsSummaryList", + "documentation":"

The summary of step groups in a migration workflow.

" + } + } + }, + "ListWorkflowStepsRequest":{ + "type":"structure", + "required":[ + "workflowId", + "stepGroupId" + ], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results that can be returned.

", + "location":"querystring", + "locationName":"maxResults" + }, + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"uri", + "locationName":"workflowId" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"uri", + "locationName":"stepGroupId" + } + } + }, + "ListWorkflowStepsResponse":{ + "type":"structure", + "required":["workflowStepsSummary"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token.

" + }, + "workflowStepsSummary":{ + "shape":"WorkflowStepsSummaryList", + "documentation":"

The summary of steps in a migration workflow.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":0 + }, + "MigrationWorkflowDescription":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[-a-zA-Z0-9_.+, ]*" + }, + "MigrationWorkflowId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9-]+" + }, + "MigrationWorkflowName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "MigrationWorkflowStatusEnum":{ + "type":"string", + "enum":[ + "CREATING", + "NOT_STARTED", + "CREATION_FAILED", + "STARTING", + "IN_PROGRESS", + "WORKFLOW_FAILED", + "PAUSED", + "PAUSING", + "PAUSING_FAILED", + "USER_ATTENTION_REQUIRED", + "DELETING", + "DELETION_FAILED", + "DELETED", + "COMPLETED" + ] + }, + "MigrationWorkflowSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the migration workflow.

" + }, + "templateId":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "adsApplicationConfigurationName":{ + "shape":"String", + "documentation":"

The name of the application configured in Application Discovery Service.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was created.

" + }, + "endTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow ended.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the migration workflow.

" + }, + "completedSteps":{ + "shape":"Integer", + "documentation":"

The steps completed in the migration workflow.

" + }, + "totalSteps":{ + "shape":"Integer", + "documentation":"

All the steps in a migration workflow.

" + } + }, + "documentation":"

The summary of a migration workflow.

" + }, + "MigrationWorkflowSummaryList":{ + "type":"list", + "member":{"shape":"MigrationWorkflowSummary"} + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":0, + "pattern":".*\\S.*" + }, + "Owner":{ + "type":"string", + "enum":[ + "AWS_MANAGED", + "CUSTOM" + ] + }, + "PlatformCommand":{ + "type":"structure", + "members":{ + "linux":{ + "shape":"String", + "documentation":"

Command for Linux.

" + }, + "windows":{ + "shape":"String", + "documentation":"

Command for Windows.

" + } + }, + "documentation":"

Command to be run on a particular operating system.

" + }, + "PlatformScriptKey":{ + "type":"structure", + "members":{ + "linux":{ + "shape":"S3Key", + "documentation":"

The script location for Linux.

" + }, + "windows":{ + "shape":"S3Key", + "documentation":"

The script location for Windows.

" + } + }, + "documentation":"

The script location for a particular operating system.

" + }, + "PluginHealth":{ + "type":"string", + "enum":[ + "HEALTHY", + "UNHEALTHY" + ] + }, + "PluginId":{ + "type":"string", + "max":60, + "min":1, + "pattern":".*\\S.*" + }, + "PluginSummaries":{ + "type":"list", + "member":{"shape":"PluginSummary"} + }, + "PluginSummary":{ + "type":"structure", + "members":{ + "pluginId":{ + "shape":"PluginId", + "documentation":"

The ID of the plugin.

" + }, + "hostname":{ + "shape":"String", + "documentation":"

The name of the host.

" + }, + "status":{ + "shape":"PluginHealth", + "documentation":"

The status of the plugin.

" + }, + "ipAddress":{ + "shape":"IPAddress", + "documentation":"

The IP address at which the plugin is located.

" + }, + "version":{ + "shape":"PluginVersion", + "documentation":"

The version of the plugin.

" + }, + "registeredTime":{ + "shape":"String", + "documentation":"

The time at which the plugin was registered.

" + } + }, + "documentation":"

The summary of the Migration Hub Orchestrator plugin.

" + }, + "PluginVersion":{ + "type":"string", + "max":1024, + "min":0, + "pattern":".*" + }, + "ResourceArn":{ + "type":"string", + "pattern":"arn:aws:migrationhub-orchestrator:[a-z0-9-]+:[0-9]+:workflow/[.]*" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The resource is not available.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "RetryWorkflowStepRequest":{ + "type":"structure", + "required":[ + "workflowId", + "stepGroupId", + "id" + ], + "members":{ + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"querystring", + "locationName":"stepGroupId" + }, + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step.

", + "location":"uri", + "locationName":"id" + } + } + }, + "RetryWorkflowStepResponse":{ + "type":"structure", + "members":{ + "stepGroupId":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow.

" + }, + "id":{ + "shape":"String", + "documentation":"

The ID of the step.

" + }, + "status":{ + "shape":"StepStatus", + "documentation":"

The status of the step.

" + } + } + }, + "RunEnvironment":{ + "type":"string", + "enum":[ + "AWS", + "ONPREMISE" + ] + }, + "S3Bucket":{ + "type":"string", + "max":63, + "min":0, + "pattern":"[0-9a-z]+[0-9a-z\\.\\-]*[0-9a-z]+" + }, + "S3Key":{ + "type":"string", + "max":1024, + "min":0 + }, + "StartMigrationWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"uri", + "locationName":"id" + } + } + }, + "StartMigrationWorkflowResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the migration workflow.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the migration workflow.

" + }, + "lastStartTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was last started.

" + } + } + }, + "StepActionType":{ + "type":"string", + "enum":[ + "MANUAL", + "AUTOMATED" + ] + }, + "StepAutomationConfiguration":{ + "type":"structure", + "members":{ + "scriptLocationS3Bucket":{ + "shape":"String", + "documentation":"

The Amazon S3 bucket where the script is located.

" + }, + "scriptLocationS3Key":{ + "shape":"PlatformScriptKey", + "documentation":"

The Amazon S3 key for the script location.

" + }, + "command":{ + "shape":"PlatformCommand", + "documentation":"

The command to run the script.

" + }, + "runEnvironment":{ + "shape":"RunEnvironment", + "documentation":"

The source or target environment.

" + }, + "targetType":{ + "shape":"TargetType", + "documentation":"

The servers on which to run the script.

" + } + }, + "documentation":"

The custom script to run tests on source or target environments.

" + }, + "StepDescription":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[-a-zA-Z0-9_.+, ]*" + }, + "StepGroupDescription":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[-a-zA-Z0-9_.+, ]*" + }, + "StepGroupId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9-]+" + }, + "StepGroupName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "StepGroupStatus":{ + "type":"string", + "enum":[ + "AWAITING_DEPENDENCIES", + "READY", + "IN_PROGRESS", + "COMPLETED", + "FAILED", + "PAUSED", + "PAUSING", + "USER_ATTENTION_REQUIRED" + ] + }, + "StepId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9-]+" + }, + "StepInput":{ + "type":"structure", + "members":{ + "integerValue":{ + "shape":"Integer", + "documentation":"

The value of the integer.

", + "box":true + }, + "stringValue":{ + "shape":"StringValue", + "documentation":"

String value.

" + }, + "listOfStringsValue":{ + "shape":"StringList", + "documentation":"

List of string values.

" + }, + "mapOfStringValue":{ + "shape":"StringMap", + "documentation":"

Map of string values.

" + } + }, + "documentation":"

A map of key value pairs that is generated when you create a migration workflow. The key value pairs will differ based on your selection of the template.

", + "union":true + }, + "StepInputParameters":{ + "type":"map", + "key":{"shape":"StepInputParametersKey"}, + "value":{"shape":"StepInput"}, + "sensitive":true + }, + "StepInputParametersKey":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9-_ ()]+" + }, + "StepName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "StepOutput":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + }, + "dataType":{ + "shape":"DataType", + "documentation":"

The data type of the step output.

" + }, + "required":{ + "shape":"Boolean", + "documentation":"

Determine if an output is required from a step.

" + } + }, + "documentation":"

The output of the step.

" + }, + "StepOutputList":{ + "type":"list", + "member":{"shape":"StepOutput"} + }, + "StepStatus":{ + "type":"string", + "enum":[ + "AWAITING_DEPENDENCIES", + "READY", + "IN_PROGRESS", + "COMPLETED", + "FAILED", + "PAUSED", + "USER_ATTENTION_REQUIRED" + ] + }, + "StopMigrationWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"uri", + "locationName":"id" + } + } + }, + "StopMigrationWorkflowResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the migration workflow.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the migration workflow.

" + }, + "lastStopTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was stopped.

" + } + } + }, + "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"StringListMember"} + }, + "StringListMember":{ + "type":"string", + "max":100, + "min":0 + }, + "StringMap":{ + "type":"map", + "key":{"shape":"StringMapKey"}, + "value":{"shape":"StringMapValue"} + }, + "StringMapKey":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9-_ ()]+" + }, + "StringMapValue":{ + "type":"string", + "max":100, + "min":0 + }, + "StringValue":{ + "type":"string", + "max":100, + "min":0 + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"(?!aws:)[a-zA-Z+-=._:/]+" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":0 + }, + "TagMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource to which you want to add tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

A collection of labels, in the form of key:value pairs, that apply to this resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, + "TargetType":{ + "type":"string", + "enum":[ + "SINGLE", + "ALL", + "NONE" + ] + }, + "TemplateId":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "TemplateInput":{ + "type":"structure", + "members":{ + "inputName":{ + "shape":"TemplateInputName", + "documentation":"

The name of the template.

" + }, + "dataType":{ + "shape":"DataType", + "documentation":"

The data type of the template input.

" + }, + "required":{ + "shape":"Boolean", + "documentation":"

Determine if an input is required from the template.

" + } + }, + "documentation":"

The input parameters of a template.

" + }, + "TemplateInputList":{ + "type":"list", + "member":{"shape":"TemplateInput"} + }, + "TemplateInputName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "TemplateName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "TemplateStatus":{ + "type":"string", + "enum":["CREATED"] + }, + "TemplateStepGroupSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step group.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + } + }, + "documentation":"

The summary of the step group in the template.

" + }, + "TemplateStepGroupSummaryList":{ + "type":"list", + "member":{"shape":"TemplateStepGroupSummary"} + }, + "TemplateStepSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the step.

" + }, + "stepGroupId":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "templateId":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + }, + "stepActionType":{ + "shape":"StepActionType", + "documentation":"

The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step.

" + }, + "targetType":{ + "shape":"TargetType", + "documentation":"

The servers on which to run the script.

" + }, + "owner":{ + "shape":"Owner", + "documentation":"

The owner of the step.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step.

" + } + }, + "documentation":"

The summary of the step.

" + }, + "TemplateStepSummaryList":{ + "type":"list", + "member":{"shape":"TemplateStepSummary"} + }, + "TemplateSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the template.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the template.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the template.

" + } + }, + "documentation":"

The summary of the template.

" + }, + "TemplateSummaryList":{ + "type":"list", + "member":{"shape":"TemplateSummary"} + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request was denied due to request throttling.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "Tool":{ + "type":"structure", + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of an AWS service.

" + }, + "url":{ + "shape":"String", + "documentation":"

The URL of an AWS service.

" + } + }, + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "ToolsList":{ + "type":"list", + "member":{"shape":"Tool"} + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource from which you want to remove tags.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

One or more tag keys. Specify only the tag keys, not the tag values.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateMigrationWorkflowRequest":{ + "type":"structure", + "required":["id"], + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"uri", + "locationName":"id" + }, + "name":{ + "shape":"UpdateMigrationWorkflowRequestNameString", + "documentation":"

The name of the migration workflow.

" + }, + "description":{ + "shape":"UpdateMigrationWorkflowRequestDescriptionString", + "documentation":"

The description of the migration workflow.

" + }, + "inputParameters":{ + "shape":"StepInputParameters", + "documentation":"

The input parameters required to update a migration workflow.

" + }, + "stepTargets":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + } + } + }, + "UpdateMigrationWorkflowRequestDescriptionString":{ + "type":"string", + "max":500, + "min":0, + "pattern":"[-a-zA-Z0-9_.+, ]*" + }, + "UpdateMigrationWorkflowRequestNameString":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "UpdateMigrationWorkflowResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "arn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the migration workflow.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the migration workflow.

" + }, + "templateId":{ + "shape":"String", + "documentation":"

The ID of the template.

" + }, + "adsApplicationConfigurationId":{ + "shape":"String", + "documentation":"

The ID of the application configured in Application Discovery Service.

" + }, + "workflowInputs":{ + "shape":"StepInputParameters", + "documentation":"

The inputs required to update a migration workflow.

" + }, + "stepTargets":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + }, + "status":{ + "shape":"MigrationWorkflowStatusEnum", + "documentation":"

The status of the migration workflow.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was created.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the migration workflow was last modified.

" + }, + "tags":{ + "shape":"StringMap", + "documentation":"

The tags added to the migration workflow.

" + } + } + }, + "UpdateWorkflowStepGroupRequest":{ + "type":"structure", + "required":[ + "workflowId", + "id" + ], + "members":{ + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

", + "location":"querystring", + "locationName":"workflowId" + }, + "id":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

", + "location":"uri", + "locationName":"id" + }, + "name":{ + "shape":"StepGroupName", + "documentation":"

The name of the step group.

" + }, + "description":{ + "shape":"StepGroupDescription", + "documentation":"

The description of the step group.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + } + } + }, + "UpdateWorkflowStepGroupResponse":{ + "type":"structure", + "members":{ + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step group.

" + }, + "id":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step group.

" + }, + "tools":{ + "shape":"ToolsList", + "documentation":"

List of AWS services utilized in a migration workflow.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + }, + "lastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The time at which the step group was last modified.

" + } + } + }, + "UpdateWorkflowStepRequest":{ + "type":"structure", + "required":[ + "id", + "stepGroupId", + "workflowId" + ], + "members":{ + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step.

", + "location":"uri", + "locationName":"id" + }, + "stepGroupId":{ + "shape":"StepGroupId", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"MigrationWorkflowId", + "documentation":"

The ID of the migration workflow.

" + }, + "name":{ + "shape":"StepName", + "documentation":"

The name of the step.

" + }, + "description":{ + "shape":"StepDescription", + "documentation":"

The description of the step.

" + }, + "stepActionType":{ + "shape":"StepActionType", + "documentation":"

The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step.

" + }, + "workflowStepAutomationConfiguration":{ + "shape":"WorkflowStepAutomationConfiguration", + "documentation":"

The custom script to run tests on the source and target environments.

" + }, + "stepTarget":{ + "shape":"StringList", + "documentation":"

The servers on which a step will be run.

" + }, + "outputs":{ + "shape":"WorkflowStepOutputList", + "documentation":"

The outputs of a step.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step.

" + }, + "status":{ + "shape":"StepStatus", + "documentation":"

The status of the step.

" + } + } + }, + "UpdateWorkflowStepResponse":{ + "type":"structure", + "members":{ + "id":{ + "shape":"StepId", + "documentation":"

The ID of the step.

" + }, + "stepGroupId":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "workflowId":{ + "shape":"String", + "documentation":"

The ID of the migration workflow.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + } + } + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The input fails to satisfy the constraints specified by an AWS service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":false} + }, + "WorkflowStepAutomationConfiguration":{ + "type":"structure", + "members":{ + "scriptLocationS3Bucket":{ + "shape":"S3Bucket", + "documentation":"

The Amazon S3 bucket where the script is located.

" + }, + "scriptLocationS3Key":{ + "shape":"PlatformScriptKey", + "documentation":"

The Amazon S3 key for the script location.

" + }, + "command":{ + "shape":"PlatformCommand", + "documentation":"

The command required to run the script.

" + }, + "runEnvironment":{ + "shape":"RunEnvironment", + "documentation":"

The source or target environment.

" + }, + "targetType":{ + "shape":"TargetType", + "documentation":"

The servers on which to run the script.

" + } + }, + "documentation":"

The custom script to run tests on source or target environments.

" + }, + "WorkflowStepGroupSummary":{ + "type":"structure", + "members":{ + "id":{ + "shape":"String", + "documentation":"

The ID of the step group.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step group.

" + }, + "owner":{ + "shape":"Owner", + "documentation":"

The owner of the step group.

" + }, + "status":{ + "shape":"StepGroupStatus", + "documentation":"

The status of the step group.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step group.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step group.

" + } + }, + "documentation":"

The summary of a step group in a workflow.

" + }, + "WorkflowStepGroupsSummaryList":{ + "type":"list", + "member":{"shape":"WorkflowStepGroupSummary"} + }, + "WorkflowStepOutput":{ + "type":"structure", + "members":{ + "name":{ + "shape":"WorkflowStepOutputName", + "documentation":"

The name of the step.

" + }, + "dataType":{ + "shape":"DataType", + "documentation":"

The data type of the output.

" + }, + "required":{ + "shape":"Boolean", + "documentation":"

Determine if an output is required from a step.

" + }, + "value":{ + "shape":"WorkflowStepOutputUnion", + "documentation":"

The value of the output.

" + } + }, + "documentation":"

The output of a step.

" + }, + "WorkflowStepOutputList":{ + "type":"list", + "member":{"shape":"WorkflowStepOutput"} + }, + "WorkflowStepOutputName":{ + "type":"string", + "max":100, + "min":1, + "pattern":"[-a-zA-Z0-9_.+]+[-a-zA-Z0-9_.+ ]*" + }, + "WorkflowStepOutputUnion":{ + "type":"structure", + "members":{ + "integerValue":{ + "shape":"Integer", + "documentation":"

The integer value.

", + "box":true + }, + "stringValue":{ + "shape":"StringValue", + "documentation":"

The string value.

" + }, + "listOfStringValue":{ + "shape":"StringList", + "documentation":"

The list of string value.

" + } + }, + "documentation":"

A structure to hold multiple values of an output.

", + "union":true + }, + "WorkflowStepSummary":{ + "type":"structure", + "members":{ + "stepId":{ + "shape":"String", + "documentation":"

The ID of the step.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the step.

" + }, + "stepActionType":{ + "shape":"StepActionType", + "documentation":"

The action type of the step. You must run and update the status of a manual step for the workflow to continue after the completion of the step.

" + }, + "owner":{ + "shape":"Owner", + "documentation":"

The owner of the step.

" + }, + "previous":{ + "shape":"StringList", + "documentation":"

The previous step.

" + }, + "next":{ + "shape":"StringList", + "documentation":"

The next step.

" + }, + "status":{ + "shape":"StepStatus", + "documentation":"

The status of the step.

" + }, + "statusMessage":{ + "shape":"String", + "documentation":"

The status message of the migration workflow.

" + }, + "noOfSrvCompleted":{ + "shape":"Integer", + "documentation":"

The number of servers that have been migrated.

" + }, + "noOfSrvFailed":{ + "shape":"Integer", + "documentation":"

The number of servers that have failed to migrate.

" + }, + "totalNoOfSrv":{ + "shape":"Integer", + "documentation":"

The total number of servers that have been migrated.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the step.

" + }, + "scriptLocation":{ + "shape":"String", + "documentation":"

The location of the script.

" + } + }, + "documentation":"

The summary of the step in a migration workflow.

" + }, + "WorkflowStepsSummaryList":{ + "type":"list", + "member":{"shape":"WorkflowStepSummary"} + } + }, + "documentation":"

This API reference provides descriptions, syntax, and other details about each of the actions and data types for AWS Migration Hub Orchestrator. he topic for each action shows the API request parameters and the response. Alternatively, you can use one of the AWS SDKs to access an API that is tailored to the programming language or platform that you're using.

" +} diff --git a/botocore/data/migrationhuborchestrator/2021-08-28/waiters-2.json b/botocore/data/migrationhuborchestrator/2021-08-28/waiters-2.json new file mode 100644 index 0000000000..13f60ee66b --- /dev/null +++ b/botocore/data/migrationhuborchestrator/2021-08-28/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/botocore/data/polly/2016-06-10/service-2.json b/botocore/data/polly/2016-06-10/service-2.json index 29419164af..ba3e44e0c2 100644 --- a/botocore/data/polly/2016-06-10/service-2.json +++ b/botocore/data/polly/2016-06-10/service-2.json @@ -418,7 +418,8 @@ "en-NZ", "en-ZA", "ca-ES", - "de-AT" + "de-AT", + "yue-CN" ] }, "LanguageCodeList":{ @@ -1082,7 +1083,8 @@ "Daniel", "Liam", "Pedro", - "Kajal" + "Kajal", + "Hiujin" ] }, "VoiceList":{ diff --git a/botocore/data/proton/2020-07-20/service-2.json b/botocore/data/proton/2020-07-20/service-2.json index c0ab5492cc..51cf3043a6 100644 --- a/botocore/data/proton/2020-07-20/service-2.json +++ b/botocore/data/proton/2020-07-20/service-2.json @@ -29,7 +29,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the Proton Administrator guide.

", + "documentation":"

In a management account, an environment account connection request is accepted. When the environment account connection request is accepted, Proton can use the associated IAM role to provision environment infrastructure resources in the associated environment account.

For more information, see Environment account connections in the Proton User guide.

", "idempotent":true }, "CancelComponentDeployment":{ @@ -48,7 +48,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a component deployment (for a component that is in the IN_PROGRESS deployment status).

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Attempts to cancel a component deployment (for a component that is in the IN_PROGRESS deployment status).

For more information about components, see Proton components in the Proton User Guide.

" }, "CancelEnvironmentDeployment":{ "name":"CancelEnvironmentDeployment", @@ -66,7 +66,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the Proton Administrator guide.

The following list includes potential cancellation scenarios.

" + "documentation":"

Attempts to cancel an environment deployment on an UpdateEnvironment action, if the deployment is IN_PROGRESS. For more information, see Update an environment in the Proton User guide.

The following list includes potential cancellation scenarios.

" }, "CancelServiceInstanceDeployment":{ "name":"CancelServiceInstanceDeployment", @@ -84,7 +84,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the Proton Administrator guide or the Proton User guide.

The following list includes potential cancellation scenarios.

" + "documentation":"

Attempts to cancel a service instance deployment on an UpdateServiceInstance action, if the deployment is IN_PROGRESS. For more information, see Update a service instance in the Proton User guide.

The following list includes potential cancellation scenarios.

" }, "CancelServicePipelineDeployment":{ "name":"CancelServicePipelineDeployment", @@ -102,7 +102,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the Proton Administrator guide or the Proton User guide.

The following list includes potential cancellation scenarios.

" + "documentation":"

Attempts to cancel a service pipeline deployment on an UpdateServicePipeline action, if the deployment is IN_PROGRESS. For more information, see Update a service pipeline in the Proton User guide.

The following list includes potential cancellation scenarios.

" }, "CreateComponent":{ "name":"CreateComponent", @@ -121,7 +121,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an Proton component. A component is an infrastructure extension for a service instance.

For more information about components, see Proton components in the Proton Administrator Guide.

", + "documentation":"

Create an Proton component. A component is an infrastructure extension for a service instance.

For more information about components, see Proton components in the Proton User Guide.

", "idempotent":true }, "CreateEnvironment":{ @@ -141,7 +141,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deploy a new environment. An Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services.

You can provision environments using the following methods:

For more information, see Environments and Provisioning methods in the Proton Administrator Guide.

", + "documentation":"

Deploy a new environment. An Proton environment is created from an environment template that defines infrastructure and resources that can be shared across services.

You can provision environments using the following methods:

For more information, see Environments and Provisioning methods in the Proton User Guide.

", "idempotent":true }, "CreateEnvironmentAccountConnection":{ @@ -160,7 +160,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the Proton Administrator guide.

", + "documentation":"

Create an environment account connection in an environment account so that environment infrastructure resources can be provisioned in the environment account from a management account.

An environment account connection is a secure bi-directional connection between a management account and an environment account that maintains authorization and permissions. For more information, see Environment account connections in the Proton User guide.

", "idempotent":true }, "CreateEnvironmentTemplate":{ @@ -179,7 +179,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an environment template for Proton. For more information, see Environment Templates in the Proton Administrator Guide.

You can create an environment template in one of the two following ways:

", + "documentation":"

Create an environment template for Proton. For more information, see Environment Templates in the Proton User Guide.

You can create an environment template in one of the two following ways:

", "idempotent":true }, "CreateEnvironmentTemplateVersion":{ @@ -218,7 +218,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create and register a link to a repository that can be used with self-managed provisioning (infrastructure or pipelines) or for template sync configurations. When you create a repository link, Proton creates a service-linked role for you.

For more information, see Self-managed provisioning, Template bundles, and Template sync configurations in the Proton Administrator Guide.

", + "documentation":"

Create and register a link to a repository. Proton uses the link to repeatedly access the repository, to either push to it (self-managed provisioning) or pull from it (template sync). You can share a linked repository across multiple resources (like environments using self-managed provisioning, or synced templates). When you create a repository link, Proton creates a service-linked role for you.

For more information, see Self-managed provisioning, Template bundles, and Template sync configurations in the Proton User Guide.

", "idempotent":true }, "CreateService":{ @@ -238,7 +238,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create an Proton service. An Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the Proton Administrator Guide and Services in the Proton User Guide.

", + "documentation":"

Create an Proton service. An Proton service is an instantiation of a service template and often includes several service instances and pipeline. For more information, see Services in the Proton User Guide.

", "idempotent":true }, "CreateServiceTemplate":{ @@ -257,7 +257,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CI/CD service pipeline. Developers, in turn, select the service template from Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Service Templates in the Proton Administrator Guide.

", + "documentation":"

Create a service template. The administrator creates a service template to define standardized infrastructure and an optional CI/CD service pipeline. Developers, in turn, select the service template from Proton. If the selected service template includes a service pipeline definition, they provide a link to their source code repository. Proton then deploys and manages the infrastructure defined by the selected service template. For more information, see Proton templates in the Proton User Guide.

", "idempotent":true }, "CreateServiceTemplateVersion":{ @@ -296,7 +296,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Set up a template to create new template versions automatically. When a commit is pushed to your registered repository, Proton checks for changes to your repository template bundles. If it detects a template bundle change, a new major or minor version of its template is created, if the version doesn’t already exist. For more information, see Template sync configurations in the Proton Administrator Guide.

", + "documentation":"

Set up a template to create new template versions automatically by tracking a linked repository. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

When a commit is pushed to your linked repository, Proton checks for changes to your repository template bundles. If it detects a template bundle change, a new major or minor version of its template is created, if the version doesn’t already exist. For more information, see Template sync configurations in the Proton User Guide.

", "idempotent":true }, "DeleteComponent":{ @@ -315,7 +315,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Delete an Proton component resource.

For more information about components, see Proton components in the Proton Administrator Guide.

", + "documentation":"

Delete an Proton component resource.

For more information about components, see Proton components in the Proton User Guide.

", "idempotent":true }, "DeleteEnvironment":{ @@ -353,7 +353,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an Proton environment, Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the Proton Administrator guide.

", + "documentation":"

In an environment account, delete an environment account connection.

After you delete an environment account connection that’s in use by an Proton environment, Proton can’t manage the environment infrastructure resources until a new environment account connection is accepted for the environment account and associated environment. You're responsible for cleaning up provisioned resources that remain without an environment connection.

For more information, see Environment account connections in the Proton User guide.

", "idempotent":true }, "DeleteEnvironmentTemplate":{ @@ -429,7 +429,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Delete a service, with its instances and pipeline.

You can't delete a service if it has any service instances that have components attached to them.

For more information about components, see Proton components in the Proton Administrator Guide.

", + "documentation":"

Delete a service, with its instances and pipeline.

You can't delete a service if it has any service instances that have components attached to them.

For more information about components, see Proton components in the Proton User Guide.

", "idempotent":true }, "DeleteServiceTemplate":{ @@ -504,7 +504,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Get detail data for the Proton pipeline service role.

" + "documentation":"

Get detail data for Proton account-wide settings.

" }, "GetComponent":{ "name":"GetComponent", @@ -521,7 +521,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Get detailed data for a component.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Get detailed data for a component.

For more information about components, see Proton components in the Proton User Guide.

" }, "GetEnvironment":{ "name":"GetEnvironment", @@ -555,7 +555,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, get the detailed data for an environment account connection.

For more information, see Environment account connections in the Proton Administrator guide.

" + "documentation":"

In an environment account, get the detailed data for an environment account connection.

For more information, see Environment account connections in the Proton User guide.

" }, "GetEnvironmentTemplate":{ "name":"GetEnvironmentTemplate", @@ -606,7 +606,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Get detail data for a repository.

" + "documentation":"

Get detail data for a linked repository.

" }, "GetRepositorySyncStatus":{ "name":"GetRepositorySyncStatus", @@ -623,7 +623,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Get the sync status of a repository used for Proton template sync. For more information about template sync, see .

A repository sync status isn't tied to the Proton Repository resource (or any other Proton resource). Therefore, tags on an Proton Repository resource have no effect on this action. Specifically, you can't use these tags to control access to this action using Attribute-based access control (ABAC).

For more information about ABAC, see ABAC in the Proton Administrator Guide.

" + "documentation":"

Get the sync status of a repository used for Proton template sync. For more information about template sync, see .

A repository sync status isn't tied to the Proton Repository resource (or any other Proton resource). Therefore, tags on an Proton Repository resource have no effect on this action. Specifically, you can't use these tags to control access to this action using Attribute-based access control (ABAC).

For more information about ABAC, see ABAC in the Proton User Guide.

" }, "GetService":{ "name":"GetService", @@ -742,7 +742,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Get a list of component Infrastructure as Code (IaC) outputs.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Get a list of component Infrastructure as Code (IaC) outputs.

For more information about components, see Proton components in the Proton User Guide.

" }, "ListComponentProvisionedResources":{ "name":"ListComponentProvisionedResources", @@ -759,7 +759,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

List provisioned resources for a component with details.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

List provisioned resources for a component with details.

For more information about components, see Proton components in the Proton User Guide.

" }, "ListComponents":{ "name":"ListComponents", @@ -775,7 +775,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

List components with summary data. You can filter the result list by environment, service, or a single service instance.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

List components with summary data. You can filter the result list by environment, service, or a single service instance.

For more information about components, see Proton components in the Proton User Guide.

" }, "ListEnvironmentAccountConnections":{ "name":"ListEnvironmentAccountConnections", @@ -791,7 +791,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

View a list of environment account connections.

For more information, see Environment account connections in the Proton Administrator guide.

" + "documentation":"

View a list of environment account connections.

For more information, see Environment account connections in the Proton User guide.

" }, "ListEnvironmentOutputs":{ "name":"ListEnvironmentOutputs", @@ -892,7 +892,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

List repositories with detail data.

" + "documentation":"

List linked repositories with detail data.

" }, "ListRepositorySyncDefinitions":{ "name":"ListRepositorySyncDefinitions", @@ -1059,7 +1059,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

List tags for a resource. For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

List tags for a resource. For more information, see Proton resources and tagging in the Proton User Guide.

" }, "NotifyResourceDeploymentStatusChange":{ "name":"NotifyResourceDeploymentStatusChange", @@ -1078,7 +1078,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Notify Proton of status changes to a provisioned resource when you use self-managed provisioning.

For more information, see Self-managed provisioning in the Proton Administrator Guide.

" + "documentation":"

Notify Proton of status changes to a provisioned resource when you use self-managed provisioning.

For more information, see Self-managed provisioning in the Proton User Guide.

" }, "RejectEnvironmentAccountConnection":{ "name":"RejectEnvironmentAccountConnection", @@ -1096,7 +1096,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you can't accept or use the rejected environment account connection.

You can’t reject an environment account connection that's connected to an environment.

For more information, see Environment account connections in the Proton Administrator guide.

", + "documentation":"

In a management account, reject an environment account connection from another environment account.

After you reject an environment account connection request, you can't accept or use the rejected environment account connection.

You can’t reject an environment account connection that's connected to an environment.

For more information, see Environment account connections in the Proton User guide.

", "idempotent":true }, "TagResource":{ @@ -1115,7 +1115,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Tag a resource. A tag is a key-value pair of metadata that you associate with an Proton resource.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

", + "documentation":"

Tag a resource. A tag is a key-value pair of metadata that you associate with an Proton resource.

For more information, see Proton resources and tagging in the Proton User Guide.

", "idempotent":true }, "UntagResource":{ @@ -1134,7 +1134,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Remove a customer tag from a resource. A tag is a key-value pair of metadata associated with an Proton resource.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

", + "documentation":"

Remove a customer tag from a resource. A tag is a key-value pair of metadata associated with an Proton resource.

For more information, see Proton resources and tagging in the Proton User Guide.

", "idempotent":true }, "UpdateAccountSettings":{ @@ -1152,7 +1152,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update the Proton service pipeline role or repository settings.

" + "documentation":"

Update Proton settings that are used for multiple services in the Amazon Web Services account.

" }, "UpdateComponent":{ "name":"UpdateComponent", @@ -1171,7 +1171,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update a component.

There are a few modes for updating a component. The deploymentType field defines the mode.

You can't update a component while its deployment status, or the deployment status of a service instance attached to it, is IN_PROGRESS.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Update a component.

There are a few modes for updating a component. The deploymentType field defines the mode.

You can't update a component while its deployment status, or the deployment status of a service instance attached to it, is IN_PROGRESS.

For more information about components, see Proton components in the Proton User Guide.

" }, "UpdateEnvironment":{ "name":"UpdateEnvironment", @@ -1189,7 +1189,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn and provisioningRepository parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if that connection was created in the same environment account that the current environment account connection was created in. The account connection must also be associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter. You can't update or connect the environment to an environment account connection if it isn't already associated with an environment connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

If the environment was configured for Amazon Web Services-managed provisioning, omit the provisioningRepository parameter.

If the environment was configured for self-managed provisioning, specify the provisioningRepository parameter and omit the protonServiceRoleArn and environmentAccountConnectionId parameters.

For more information, see Environments and Provisioning methods in the Proton Administrator Guide.

There are four modes for updating an environment. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version.

" + "documentation":"

Update an environment.

If the environment is associated with an environment account connection, don't update or include the protonServiceRoleArn and provisioningRepository parameter to update or connect to an environment account connection.

You can only update to a new environment account connection if that connection was created in the same environment account that the current environment account connection was created in. The account connection must also be associated with the current environment.

If the environment isn't associated with an environment account connection, don't update or include the environmentAccountConnectionId parameter. You can't update or connect the environment to an environment account connection if it isn't already associated with an environment connection.

You can update either the environmentAccountConnectionId or protonServiceRoleArn parameter and value. You can’t update both.

If the environment was configured for Amazon Web Services-managed provisioning, omit the provisioningRepository parameter.

If the environment was configured for self-managed provisioning, specify the provisioningRepository parameter and omit the protonServiceRoleArn and environmentAccountConnectionId parameters.

For more information, see Environments and Provisioning methods in the Proton User Guide.

There are four modes for updating an environment. The deploymentType field defines the mode.

NONE

In this mode, a deployment doesn't occur. Only the requested metadata parameters are updated.

CURRENT_VERSION

In this mode, the environment is deployed and updated with the new spec that you provide. Only requested parameters are updated. Don’t include minor or major version parameters when you use this deployment-type.

MINOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) minor version of the current major version in use, by default. You can also specify a different minor version of the current major version in use.

MAJOR_VERSION

In this mode, the environment is deployed and updated with the published, recommended (latest) major and minor version of the current template, by default. You can also specify a different major version that's higher than the major version in use and a minor version.

" }, "UpdateEnvironmentAccountConnection":{ "name":"UpdateEnvironmentAccountConnection", @@ -1207,7 +1207,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the Proton Administrator guide.

", + "documentation":"

In an environment account, update an environment account connection to use a new IAM role.

For more information, see Environment account connections in the Proton User guide.

", "idempotent":true }, "UpdateEnvironmentTemplate":{ @@ -1263,7 +1263,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Edit a service description or use a spec to add and delete service instances.

Existing service instances and the service pipeline can't be edited using this API. They can only be deleted.

Use the description parameter to modify the description.

Edit the spec parameter to add or delete instances.

You can't delete a service instance (remove it from the spec) if it has an attached component.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Edit a service description or use a spec to add and delete service instances.

Existing service instances and the service pipeline can't be edited using this API. They can only be deleted.

Use the description parameter to modify the description.

Edit the spec parameter to add or delete instances.

You can't delete a service instance (remove it from the spec) if it has an attached component.

For more information about components, see Proton components in the Proton User Guide.

" }, "UpdateServiceInstance":{ "name":"UpdateServiceInstance", @@ -1281,7 +1281,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update a service instance.

There are a few modes for updating a service instance. The deploymentType field defines the mode.

You can't update a service instance while its deployment status, or the deployment status of a component attached to it, is IN_PROGRESS.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Update a service instance.

There are a few modes for updating a service instance. The deploymentType field defines the mode.

You can't update a service instance while its deployment status, or the deployment status of a component attached to it, is IN_PROGRESS.

For more information about components, see Proton components in the Proton User Guide.

" }, "UpdateServicePipeline":{ "name":"UpdateServicePipeline", @@ -1353,7 +1353,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServerException"} ], - "documentation":"

Update template sync configuration parameters, except for the templateName and templateType.

" + "documentation":"

Update template sync configuration parameters, except for the templateName and templateType. Repository details (branch, name, and provider) should be of a linked repository. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

" } }, "shapes":{ @@ -1391,14 +1391,14 @@ "members":{ "pipelineProvisioningRepository":{ "shape":"RepositoryBranch", - "documentation":"

The repository configured in the Amazon Web Services account for pipeline provisioning. Required it if you have environments configured for self-managed provisioning with services that include pipelines.

" + "documentation":"

The linked repository for pipeline provisioning. Required if you have environments configured for self-managed provisioning with services that include pipelines. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

" }, "pipelineServiceRoleArn":{ "shape":"PipelineRoleArn", "documentation":"

The Amazon Resource Name (ARN) of the service role you want to use for provisioning pipelines. Assumed by Proton for Amazon Web Services-managed provisioning, and by customer-owned automation for self-managed provisioning.

" } }, - "documentation":"

The Proton pipeline service role and repository data shared across the Amazon Web Services account.

" + "documentation":"

Proton settings that are used for multiple services in the Amazon Web Services account.

" }, "Arn":{ "type":"string", @@ -1409,6 +1409,10 @@ "type":"string", "pattern":"^\\d{12}$" }, + "Boolean":{ + "type":"boolean", + "box":true + }, "CancelComponentDeploymentInput":{ "type":"structure", "required":["componentName"], @@ -1612,7 +1616,7 @@ "documentation":"

The service spec that the component uses to access service inputs. Provided when a component is attached to a service instance.

" } }, - "documentation":"

Detailed data of an Proton component resource.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Detailed data of an Proton component resource.

For more information about components, see Proton components in the Proton User Guide.

" }, "ComponentArn":{"type":"string"}, "ComponentDeploymentUpdateType":{ @@ -1678,7 +1682,7 @@ "documentation":"

The name of the service that serviceInstanceName is associated with. Provided when a component is attached to a service instance.

" } }, - "documentation":"

Summary data of an Proton component resource.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

Summary data of an Proton component resource.

For more information about components, see Proton components in the Proton User Guide.

" }, "ComponentSummaryList":{ "type":"list", @@ -1731,7 +1735,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton component. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton component. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" }, "templateFile":{ "shape":"TemplateFileContents", @@ -1764,7 +1768,7 @@ }, "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

You must specify componentRoleArn to allow directly defined components to be associated with any environments running in this account.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

You must specify componentRoleArn to allow directly defined components to be associated with any environments running in this account.

For more information about components, see Proton components in the Proton User Guide.

" }, "environmentName":{ "shape":"ResourceName", @@ -1780,7 +1784,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton environment account connection. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton environment account connection. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" } } }, @@ -1805,7 +1809,7 @@ "members":{ "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

You must specify componentRoleArn to allow directly defined components to be associated with this environment.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

You must specify componentRoleArn to allow directly defined components to be associated with this environment.

For more information about components, see Proton components in the Proton User Guide.

" }, "description":{ "shape":"Description", @@ -1813,7 +1817,7 @@ }, "environmentAccountConnectionId":{ "shape":"EnvironmentAccountConnectionId", - "documentation":"

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. For more information, see Environment account connections in the Proton Administrator guide.

To use Amazon Web Services-managed provisioning for the environment, specify either the environmentAccountConnectionId or protonServiceRoleArn parameter and omit the provisioningRepository parameter.

" + "documentation":"

The ID of the environment account connection that you provide if you're provisioning your environment infrastructure resources to an environment account. For more information, see Environment account connections in the Proton User guide.

To use Amazon Web Services-managed provisioning for the environment, specify either the environmentAccountConnectionId or protonServiceRoleArn parameter and omit the provisioningRepository parameter.

" }, "name":{ "shape":"ResourceName", @@ -1825,15 +1829,15 @@ }, "provisioningRepository":{ "shape":"RepositoryBranchInput", - "documentation":"

The infrastructure repository that you use to host your rendered infrastructure templates for self-managed provisioning.

To use self-managed provisioning for the environment, specify this parameter and omit the environmentAccountConnectionId and protonServiceRoleArn parameters.

" + "documentation":"

The linked repository that you use to host your rendered infrastructure templates for self-managed provisioning. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

To use self-managed provisioning for the environment, specify this parameter and omit the environmentAccountConnectionId and protonServiceRoleArn parameters.

" }, "spec":{ "shape":"SpecContents", - "documentation":"

A YAML formatted string that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the Proton Administrator Guide.

" + "documentation":"

A YAML formatted string that provides inputs as defined in the environment template bundle schema file. For more information, see Environments in the Proton User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton environment. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton environment. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" }, "templateMajorVersion":{ "shape":"TemplateVersionPart", @@ -1845,7 +1849,7 @@ }, "templateName":{ "shape":"ResourceName", - "documentation":"

The name of the environment template. For more information, see Environment Templates in the Proton Administrator Guide.

" + "documentation":"

The name of the environment template. For more information, see Environment Templates in the Proton User Guide.

" } } }, @@ -1885,7 +1889,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton environment template. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton environment template. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" } } }, @@ -1925,7 +1929,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton environment template version. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton environment template version. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" }, "templateName":{ "shape":"ResourceName", @@ -1953,7 +1957,7 @@ "members":{ "connectionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of your Amazon Web Services CodeStar connection. For more information, see Setting up for Proton in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of your AWS CodeStar connection that connects Proton to your repository provider account. For more information, see Setting up for Proton in the Proton User Guide.

" }, "encryptionKey":{ "shape":"Arn", @@ -1969,7 +1973,7 @@ }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton repository. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton repository. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" } } }, @@ -1979,7 +1983,7 @@ "members":{ "repository":{ "shape":"Repository", - "documentation":"

The repository detail data that's returned by Proton.

" + "documentation":"

The repository link's detail data that's returned by Proton.

" } } }, @@ -2006,7 +2010,7 @@ }, "repositoryConnectionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up repository connection in the Proton Administrator Guide and Setting up with Proton in the Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

" + "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Setting up an AWS CodeStar connection in the Proton User Guide. Don't include this parameter if your service template doesn't include a service pipeline.

" }, "repositoryId":{ "shape":"RepositoryId", @@ -2014,11 +2018,11 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the Proton Administrator Guide and Create a service in the Proton User Guide.

" + "documentation":"

A link to a spec file that provides inputs as defined in the service template bundle schema file. The spec file is in YAML format. Don’t include pipeline inputs in the spec if your service template doesn’t include a service pipeline. For more information, see Create a service in the Proton User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton service. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton service. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" }, "templateMajorVersion":{ "shape":"TemplateVersionPart", @@ -2066,11 +2070,11 @@ }, "pipelineProvisioning":{ "shape":"Provisioning", - "documentation":"

By default, Proton provides a service pipeline for your service. When this parameter is included, it indicates that an Proton service pipeline isn't provided for your service. After it's included, it can't be changed. For more information, see Service template bundles in the Proton Administrator Guide.

" + "documentation":"

By default, Proton provides a service pipeline for your service. When this parameter is included, it indicates that an Proton service pipeline isn't provided for your service. After it's included, it can't be changed. For more information, see Template bundles in the Proton User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton service template. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton service template. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" } } }, @@ -2115,11 +2119,11 @@ }, "supportedComponentSources":{ "shape":"ServiceTemplateSupportedComponentSourceInputList", - "documentation":"

An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version.

For more information about components, see Proton components in the Proton User Guide.

" }, "tags":{ "shape":"TagList", - "documentation":"

An optional list of metadata items that you can associate with the Proton service template version. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton Administrator Guide or Proton User Guide.

" + "documentation":"

An optional list of metadata items that you can associate with the Proton service template version. A tag is a key-value pair.

For more information, see Proton resources and tagging in the Proton User Guide.

" }, "templateName":{ "shape":"ResourceName", @@ -2149,11 +2153,11 @@ "members":{ "branch":{ "shape":"GitBranchName", - "documentation":"

The branch of the registered repository for your template.

" + "documentation":"

The repository branch for your template.

" }, "repositoryName":{ "shape":"RepositoryName", - "documentation":"

The name of your repository (for example, myrepos/myrepo).

" + "documentation":"

The repository name (for example, myrepos/myrepo).

" }, "repositoryProvider":{ "shape":"RepositoryProvider", @@ -2298,7 +2302,7 @@ "members":{ "name":{ "shape":"RepositoryName", - "documentation":"

The name of the repository.

" + "documentation":"

The repository name.

" }, "provider":{ "shape":"RepositoryProvider", @@ -2311,7 +2315,7 @@ "members":{ "repository":{ "shape":"Repository", - "documentation":"

The repository detail data that's returned by Proton.

" + "documentation":"

The deleted repository link's detail data that's returned by Proton.

" } } }, @@ -2473,7 +2477,7 @@ }, "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

The environment must have a componentRoleArn to allow directly defined components to be associated with the environment.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

The environment must have a componentRoleArn to allow directly defined components to be associated with the environment.

For more information about components, see Proton components in the Proton User Guide.

" }, "createdAt":{ "shape":"Timestamp", @@ -2521,7 +2525,7 @@ }, "provisioningRepository":{ "shape":"RepositoryBranch", - "documentation":"

The infrastructure repository that you use to host your rendered infrastructure templates for self-managed provisioning.

" + "documentation":"

The linked repository that you use to host your rendered infrastructure templates for self-managed provisioning. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

" }, "spec":{ "shape":"SpecContents", @@ -2562,7 +2566,7 @@ }, "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

The environment account connection must have a componentRoleArn to allow directly defined components to be associated with any environments running in the account.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

The environment account connection must have a componentRoleArn to allow directly defined components to be associated with any environments running in the account.

For more information about components, see Proton components in the Proton User Guide.

" }, "environmentAccountId":{ "shape":"AwsAccountId", @@ -2643,7 +2647,7 @@ }, "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

The environment account connection must have a componentRoleArn to allow directly defined components to be associated with any environments running in the account.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

The environment account connection must have a componentRoleArn to allow directly defined components to be associated with any environments running in the account.

For more information about components, see Proton components in the Proton User Guide.

" }, "environmentAccountId":{ "shape":"AwsAccountId", @@ -2705,7 +2709,7 @@ }, "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

The environment must have a componentRoleArn to allow directly defined components to be associated with the environment.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

The environment must have a componentRoleArn to allow directly defined components to be associated with the environment.

For more information about components, see Proton components in the Proton User Guide.

" }, "createdAt":{ "shape":"Timestamp", @@ -3129,7 +3133,7 @@ }, "templateName":{ "shape":"ResourceName", - "documentation":"

The name of the environment template a version of which you want to get detailed data for..

" + "documentation":"

The name of the environment template a version of which you want to get detailed data for.

" } } }, @@ -3166,7 +3170,7 @@ "members":{ "repository":{ "shape":"Repository", - "documentation":"

The repository detail data that's returned by Proton.

" + "documentation":"

The repository link's detail data that's returned by Proton.

" } } }, @@ -3693,7 +3697,7 @@ }, "repositories":{ "shape":"RepositorySummaryList", - "documentation":"

An array of repositories.

" + "documentation":"

An array of repository links.

" } } }, @@ -4120,7 +4124,7 @@ }, "provisioningEngine":{ "shape":"ProvisionedResourceEngine", - "documentation":"

The resource provisioning engine. At this time, CLOUDFORMATION can be used for Amazon Web Services-managed provisioning, and TERRAFORM can be used for self-managed provisioning.

For more information, see Self-managed provisioning in the Proton Administrator Guide.

" + "documentation":"

The resource provisioning engine. At this time, CLOUDFORMATION can be used for Amazon Web Services-managed provisioning, and TERRAFORM can be used for self-managed provisioning.

For more information, see Self-managed provisioning in the Proton User Guide.

" } }, "documentation":"

Detail data for a provisioned resource.

" @@ -4182,11 +4186,11 @@ "members":{ "arn":{ "shape":"RepositoryArn", - "documentation":"

The repository Amazon Resource Name (ARN).

" + "documentation":"

The Amazon Resource Name (ARN) of the linked repository.

" }, "connectionArn":{ "shape":"Arn", - "documentation":"

The repository Amazon Web Services CodeStar connection that connects Proton to your repository.

" + "documentation":"

The Amazon Resource Name (ARN) of your AWS CodeStar connection that connects Proton to your repository provider account.

" }, "encryptionKey":{ "shape":"Arn", @@ -4201,7 +4205,7 @@ "documentation":"

The repository provider.

" } }, - "documentation":"

Detailed data of a repository that has been registered with Proton.

" + "documentation":"

Detailed data of a linked repository—a repository that has been registered with Proton.

" }, "RepositoryArn":{"type":"string"}, "RepositoryBranch":{ @@ -4215,7 +4219,7 @@ "members":{ "arn":{ "shape":"RepositoryArn", - "documentation":"

The Amazon Resource Name (ARN) of the repository branch.

" + "documentation":"

The Amazon Resource Name (ARN) of the linked repository.

" }, "branch":{ "shape":"GitBranchName", @@ -4230,7 +4234,7 @@ "documentation":"

The repository provider.

" } }, - "documentation":"

Detail data for a repository branch.

" + "documentation":"

Detail data for a linked repository branch.

" }, "RepositoryBranchInput":{ "type":"structure", @@ -4253,7 +4257,7 @@ "documentation":"

The repository provider.

" } }, - "documentation":"

Detail input data for a repository branch.

" + "documentation":"

Detail input data for a linked repository branch.

" }, "RepositoryId":{ "type":"string", @@ -4284,7 +4288,7 @@ "members":{ "arn":{ "shape":"RepositoryArn", - "documentation":"

The Amazon Resource Name (ARN) for a repository.

" + "documentation":"

The Amazon Resource Name (ARN) of the linked repository.

" }, "name":{ "shape":"RepositoryName", @@ -4295,7 +4299,7 @@ "documentation":"

The repository provider.

" } }, - "documentation":"

Summary data of a repository that has been registered with Proton.

" + "documentation":"

Summary data of a linked repository—a repository that has been registered with Proton.

" }, "RepositorySummaryList":{ "type":"list", @@ -4350,7 +4354,7 @@ "documentation":"

The resource that is synced to.

" } }, - "documentation":"

The repository sync definition.

" + "documentation":"

A repository sync definition.

" }, "RepositorySyncDefinitionList":{ "type":"list", @@ -4614,7 +4618,7 @@ }, "repositoryConnectionArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Set up a repository connection in the Proton Administrator Guide and Setting up with Proton in the Proton User Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the repository connection. For more information, see Setting up an AWS CodeStar connection in the Proton User Guide.

" }, "repositoryId":{ "shape":"RepositoryId", @@ -4845,7 +4849,7 @@ "members":{ "message":{"shape":"ErrorMessage"} }, - "documentation":"

A quota was exceeded. For more information, see Proton Quotas in the Proton Administrator Guide.

", + "documentation":"

A quota was exceeded. For more information, see Proton Quotas in the Proton User Guide.

", "exception":true }, "ServiceStatus":{ @@ -5081,7 +5085,7 @@ }, "supportedComponentSources":{ "shape":"ServiceTemplateSupportedComponentSourceInputList", - "documentation":"

An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version.

For more information about components, see Proton components in the Proton User Guide.

" }, "templateName":{ "shape":"ResourceName", @@ -5266,7 +5270,7 @@ }, "repositoryName":{ "shape":"RepositoryName", - "documentation":"

The name of the repository, for example myrepos/myrepo.

" + "documentation":"

The repository name (for example, myrepos/myrepo).

" }, "repositoryProvider":{ "shape":"RepositoryProvider", @@ -5356,13 +5360,17 @@ "UpdateAccountSettingsInput":{ "type":"structure", "members":{ + "deletePipelineProvisioningRepository":{ + "shape":"Boolean", + "documentation":"

Set to true to remove a configured pipeline repository from the account settings. Don't set this field if you are updating the configured pipeline repository.

" + }, "pipelineProvisioningRepository":{ "shape":"RepositoryBranchInput", - "documentation":"

A repository for pipeline provisioning. Specify it if you have environments configured for self-managed provisioning with services that include pipelines.

" + "documentation":"

A linked repository for pipeline provisioning. Specify it if you have environments configured for self-managed provisioning with services that include pipelines. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

To remove a previously configured repository, set deletePipelineProvisioningRepository to true, and don't set pipelineProvisioningRepository.

" }, "pipelineServiceRoleArn":{ "shape":"PipelineRoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the service role you want to use for provisioning pipelines. Assumed by Proton for Amazon Web Services-managed provisioning, and by customer-owned automation for self-managed provisioning.

" + "documentation":"

The Amazon Resource Name (ARN) of the service role you want to use for provisioning pipelines. Assumed by Proton for Amazon Web Services-managed provisioning, and by customer-owned automation for self-managed provisioning.

To remove a previously configured ARN, specify an empty string.

" } } }, @@ -5429,7 +5437,7 @@ "members":{ "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

The environment account connection must have a componentRoleArn to allow directly defined components to be associated with any environments running in the account.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in the associated environment account. It determines the scope of infrastructure that a component can provision in the account.

The environment account connection must have a componentRoleArn to allow directly defined components to be associated with any environments running in the account.

For more information about components, see Proton components in the Proton User Guide.

" }, "id":{ "shape":"EnvironmentAccountConnectionId", @@ -5460,7 +5468,7 @@ "members":{ "componentRoleArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

The environment must have a componentRoleArn to allow directly defined components to be associated with the environment.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM service role that Proton uses when provisioning directly defined components in this environment. It determines the scope of infrastructure that a component can provision.

The environment must have a componentRoleArn to allow directly defined components to be associated with the environment.

For more information about components, see Proton components in the Proton User Guide.

" }, "deploymentType":{ "shape":"DeploymentUpdateType", @@ -5484,7 +5492,7 @@ }, "provisioningRepository":{ "shape":"RepositoryBranchInput", - "documentation":"

The infrastructure repository that you use to host your rendered infrastructure templates for self-managed provisioning.

" + "documentation":"

The linked repository that you use to host your rendered infrastructure templates for self-managed provisioning. A linked repository is a repository that has been registered with Proton. For more information, see CreateRepository.

" }, "spec":{ "shape":"SpecContents", @@ -5592,7 +5600,7 @@ }, "spec":{ "shape":"SpecContents", - "documentation":"

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the Proton Administrator Guide or the Proton User Guide.

" + "documentation":"

Lists the service instances to add and the existing service instances to remain. Omit the existing service instances to delete from the list. Don't include edits to the existing service instances or pipeline. For more information, see Edit a service in the Proton User Guide.

" } } }, @@ -5748,7 +5756,7 @@ }, "supportedComponentSources":{ "shape":"ServiceTemplateSupportedComponentSourceInputList", - "documentation":"

An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version.

A change to supportedComponentSources doesn't impact existing component attachments to instances based on this template version. A change only affects later associations.

For more information about components, see Proton components in the Proton Administrator Guide.

" + "documentation":"

An array of supported component sources. Components with supported sources can be attached to service instances based on this service template version.

A change to supportedComponentSources doesn't impact existing component attachments to instances based on this template version. A change only affects later associations.

For more information about components, see Proton components in the Proton User Guide.

" }, "templateName":{ "shape":"ResourceName", @@ -5778,11 +5786,11 @@ "members":{ "branch":{ "shape":"GitBranchName", - "documentation":"

The repository branch.

" + "documentation":"

The repository branch for your template.

" }, "repositoryName":{ "shape":"RepositoryName", - "documentation":"

The name of the repository (for example, myrepos/myrepo).

" + "documentation":"

The repository name (for example, myrepos/myrepo).

" }, "repositoryProvider":{ "shape":"RepositoryProvider", @@ -5821,5 +5829,5 @@ "exception":true } }, - "documentation":"

This is the Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the Amazon Web Services CLI to access an API. For more information, see the Amazon Web Services Command Line Interface User Guide.

The Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, Proton deploys their applications. To monitor their applications that are running on Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about Proton administration, see the Proton Administrator Guide.

To learn more about deploying serverless and containerized applications on Proton, see the Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding Amazon Web Services CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

Idempotent create APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If the original resource has been modified, the retry throws a ConflictException.

If you retry with different input parameters, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Idempotent create APIs:

Idempotent delete APIs

Given a request action that has succeeded:

When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.

If you retry and the resource doesn't exist, the response is empty.

In both cases, the retry succeeds.

Idempotent delete APIs:

Asynchronous idempotent delete APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS, the retry returns the resource detail data in the response without performing any further actions.

If the original request delete operation is complete, a retry returns an empty response.

Asynchronous idempotent delete APIs:

" + "documentation":"

This is the Proton Service API Reference. It provides descriptions, syntax and usage examples for each of the actions and data types for the Proton service.

The documentation for each action shows the Query API request parameters and the XML response.

Alternatively, you can use the Amazon Web Services CLI to access an API. For more information, see the Amazon Web Services Command Line Interface User Guide.

The Proton service is a two-pronged automation framework. Administrators create service templates to provide standardized infrastructure and deployment tooling for serverless and container based applications. Developers, in turn, select from the available service templates to automate their application or service deployments.

Because administrators define the infrastructure and tooling that Proton deploys and manages, they need permissions to use all of the listed API operations.

When developers select a specific infrastructure and tooling set, Proton deploys their applications. To monitor their applications that are running on Proton, developers need permissions to the service create, list, update and delete API operations and the service instance list and update API operations.

To learn more about Proton, see the Proton User Guide.

Ensuring Idempotency

When you make a mutating API request, the request typically returns a result before the asynchronous workflows of the operation are complete. Operations might also time out or encounter other server issues before they're complete, even if the request already returned a result. This might make it difficult to determine whether the request succeeded. Moreover, you might need to retry the request multiple times to ensure that the operation completes successfully. However, if the original request and the subsequent retries are successful, the operation occurs multiple times. This means that you might create more resources than you intended.

Idempotency ensures that an API request action completes no more than one time. With an idempotent request, if the original request action completes successfully, any subsequent retries complete successfully without performing any further actions. However, the result might contain updated information, such as the current creation status.

The following lists of APIs are grouped according to methods that ensure idempotency.

Idempotent create APIs with a client token

The API actions in this list support idempotency with the use of a client token. The corresponding Amazon Web Services CLI commands also support idempotency using a client token. A client token is a unique, case-sensitive string of up to 64 ASCII characters. To make an idempotent API request using one of these actions, specify a client token in the request. We recommend that you don't reuse the same client token for other API requests. If you don’t provide a client token for these APIs, a default client token is automatically provided by SDKs.

Given a request action that has succeeded:

If you retry the request using the same client token and the same parameters, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If you retry the request using the same client token, but one or more of the parameters are different, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Client tokens expire eight hours after a request is made. If you retry the request with the expired token, a new resource is created.

If the original resource is deleted and you retry the request, a new resource is created.

Idempotent create APIs with a client token:

Idempotent create APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, and the original resource hasn't been modified, the retry succeeds without performing any further actions other than returning the original resource detail data in the response.

If the original resource has been modified, the retry throws a ConflictException.

If you retry with different input parameters, the retry throws a ValidationException with an IdempotentParameterMismatch error.

Idempotent create APIs:

Idempotent delete APIs

Given a request action that has succeeded:

When you retry the request with an API from this group and the resource was deleted, its metadata is returned in the response.

If you retry and the resource doesn't exist, the response is empty.

In both cases, the retry succeeds.

Idempotent delete APIs:

Asynchronous idempotent delete APIs

Given a request action that has succeeded:

If you retry the request with an API from this group, if the original request delete operation status is DELETE_IN_PROGRESS, the retry returns the resource detail data in the response without performing any further actions.

If the original request delete operation is complete, a retry returns an empty response.

Asynchronous idempotent delete APIs:

" } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 0a2cc64ae1..dd1c70890a 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -3097,7 +3097,7 @@ "errors":[ {"shape":"ResourceNotFound"} ], - "documentation":"

Update a model training job to request a new Debugger profiling configuration.

" + "documentation":"

Update a model training job to request a new Debugger profiling configuration or to change warm pool retention length.

" }, "UpdateTrial":{ "name":"UpdateTrial", @@ -12068,6 +12068,10 @@ "Environment":{ "shape":"TrainingEnvironmentMap", "documentation":"

The environment variables to set in the Docker container.

" + }, + "WarmPoolStatus":{ + "shape":"WarmPoolStatus", + "documentation":"

The status of the warm pool associated with the training job.

" } } }, @@ -16126,6 +16130,11 @@ }, "documentation":"

The JupyterServer app settings.

" }, + "KeepAlivePeriodInSeconds":{ + "type":"integer", + "max":3600, + "min":0 + }, "KernelDisplayName":{ "type":"string", "max":1024 @@ -19358,6 +19367,10 @@ "SortOrder":{ "shape":"SortOrder", "documentation":"

The sort order for results. The default is Ascending.

" + }, + "WarmPoolStatusEquals":{ + "shape":"WarmPoolResourceStatus", + "documentation":"

A filter that retrieves only training jobs with a specific warm pool status.

" } } }, @@ -24471,10 +24484,25 @@ "InstanceGroups":{ "shape":"InstanceGroups", "documentation":"

The configuration of a heterogeneous cluster in JSON format.

" + }, + "KeepAlivePeriodInSeconds":{ + "shape":"KeepAlivePeriodInSeconds", + "documentation":"

The duration of time in seconds to retain configured resources in a warm pool for subsequent training jobs.

" } }, "documentation":"

Describes the resources, including ML compute instances and ML storage volumes, to use for model training.

" }, + "ResourceConfigForUpdate":{ + "type":"structure", + "required":["KeepAlivePeriodInSeconds"], + "members":{ + "KeepAlivePeriodInSeconds":{ + "shape":"KeepAlivePeriodInSeconds", + "documentation":"

The KeepAlivePeriodInSeconds value specified in the ResourceConfig to update.

" + } + }, + "documentation":"

The ResourceConfig to update KeepAlivePeriodInSeconds. Other fields in the ResourceConfig cannot be updated.

" + }, "ResourceId":{ "type":"string", "max":32 @@ -24532,6 +24560,10 @@ "min":1, "pattern":".+" }, + "ResourceRetainedBillableTimeInSeconds":{ + "type":"integer", + "min":0 + }, "ResourceSpec":{ "type":"structure", "members":{ @@ -26424,6 +26456,10 @@ "TrainingJobStatus":{ "shape":"TrainingJobStatus", "documentation":"

The status of the training job.

" + }, + "WarmPoolStatus":{ + "shape":"WarmPoolStatus", + "documentation":"

The status of the warm pool associated with the training job.

" } }, "documentation":"

Provides summary information about a training job.

" @@ -28009,6 +28045,10 @@ "ProfilerRuleConfigurations":{ "shape":"ProfilerRuleConfigurations", "documentation":"

Configuration information for Debugger rules for profiling system and framework metrics.

" + }, + "ResourceConfig":{ + "shape":"ResourceConfigForUpdate", + "documentation":"

The training job ResourceConfig to update warm pool retention length.

" } } }, @@ -28447,6 +28487,34 @@ "max":3600, "min":0 }, + "WarmPoolResourceStatus":{ + "type":"string", + "enum":[ + "Available", + "Terminated", + "Reused", + "InUse" + ] + }, + "WarmPoolStatus":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"WarmPoolResourceStatus", + "documentation":"

The status of the warm pool.

" + }, + "ResourceRetainedBillableTimeInSeconds":{ + "shape":"ResourceRetainedBillableTimeInSeconds", + "documentation":"

The billable time in seconds used by the warm pool. Billable time refers to the absolute wall-clock time.

Multiply ResourceRetainedBillableTimeInSeconds by the number of instances (InstanceCount) in your training cluster to get the total compute time SageMaker bills you if you run warm pool training. The formula is as follows: ResourceRetainedBillableTimeInSeconds * InstanceCount.

" + }, + "ReusedByJob":{ + "shape":"TrainingJobName", + "documentation":"

The name of the matching training job that reused the warm pool.

" + } + }, + "documentation":"

Status and billing information about the warm pool.

" + }, "Workforce":{ "type":"structure", "required":[ diff --git a/botocore/data/secretsmanager/2017-10-17/service-2.json b/botocore/data/secretsmanager/2017-10-17/service-2.json index 4ebb3e0637..c6d5877333 100644 --- a/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -27,7 +27,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Turns off automatic rotation, and if a rotation is currently in progress, cancels the rotation.

If you cancel a rotation in progress, it can leave the VersionStage labels in an unexpected state. You might need to remove the staging label AWSPENDING from the partially created version. You also need to determine whether to roll back to the previous version of the secret by moving the staging label AWSCURRENT to the version that has AWSPENDING. To determine which version has a specific staging label, call ListSecretVersionIds. Then use UpdateSecretVersionStage to change staging labels. For more information, see How rotation works.

To turn on automatic rotation again, call RotateSecret.

Required permissions: secretsmanager:CancelRotateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Turns off automatic rotation, and if a rotation is currently in progress, cancels the rotation.

If you cancel a rotation in progress, it can leave the VersionStage labels in an unexpected state. You might need to remove the staging label AWSPENDING from the partially created version. You also need to determine whether to roll back to the previous version of the secret by moving the staging label AWSCURRENT to the version that has AWSPENDING. To determine which version has a specific staging label, call ListSecretVersionIds. Then use UpdateSecretVersionStage to change staging labels. For more information, see How rotation works.

To turn on automatic rotation again, call RotateSecret.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:CancelRotateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "CreateSecret":{ "name":"CreateSecret", @@ -49,7 +49,7 @@ {"shape":"PreconditionNotMetException"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

For information about creating a secret in the console, see Create a secret.

To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it.

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret.

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key.

Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

" + "documentation":"

Creates a new secret. A secret can be a password, a set of credentials such as a user name and password, an OAuth token, or other secret information that you store in an encrypted form in Secrets Manager. The secret also includes the connection information to access a database or other service, which Secrets Manager doesn't encrypt. A secret in Secrets Manager consists of both the protected secret data and the important information needed to manage the secret.

For information about creating a secret in the console, see Create a secret.

To create a secret, you can provide the secret value to be encrypted in either the SecretString parameter or the SecretBinary parameter, but not both. If you include SecretString or SecretBinary then Secrets Manager creates an initial secret version and automatically attaches the staging label AWSCURRENT to it.

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the JSON you store in the SecretString matches the JSON structure of a database secret.

If you don't specify an KMS encryption key, Secrets Manager uses the Amazon Web Services managed key aws/secretsmanager. If this key doesn't already exist in your account, then Secrets Manager creates it for you automatically. All users and roles in the Amazon Web Services account automatically have access to use aws/secretsmanager. Creating aws/secretsmanager can result in a one-time significant delay in returning the result.

If the secret is in a different Amazon Web Services account from the credentials calling the API, then you can't use aws/secretsmanager to encrypt the secret, and you must create and use a customer managed KMS key.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:CreateSecret. If you include tags in the secret, you also need secretsmanager:TagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

To encrypt the secret with a KMS key other than aws/secretsmanager, you need kms:GenerateDataKey and kms:Decrypt permission to the key.

" }, "DeleteResourcePolicy":{ "name":"DeleteResourcePolicy", @@ -65,7 +65,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Deletes the resource-based permission policy attached to the secret. To attach a policy to a secret, use PutResourcePolicy.

Required permissions: secretsmanager:DeleteResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Deletes the resource-based permission policy attached to the secret. To attach a policy to a secret, use PutResourcePolicy.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:DeleteResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "DeleteSecret":{ "name":"DeleteSecret", @@ -81,7 +81,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Deletes a secret and all of its versions. You can specify a recovery window during which you can restore the secret. The minimum recovery window is 7 days. The default recovery window is 30 days. Secrets Manager attaches a DeletionDate stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.

You can't delete a primary secret that is replicated to other Regions. You must first delete the replicas using RemoveRegionsFromReplication, and then delete the primary secret. When you delete a replica, it is deleted immediately.

You can't directly delete a version of a secret. Instead, you remove all staging labels from the version using UpdateSecretVersionStage. This marks the version as deprecated, and then Secrets Manager can automatically delete the version in the background.

To determine whether an application still uses a secret, you can create an Amazon CloudWatch alarm to alert you to any attempts to access a secret during the recovery window. For more information, see Monitor secrets scheduled for deletion.

Secrets Manager performs the permanent secret deletion at the end of the waiting period as a background task with low priority. There is no guarantee of a specific time after the recovery window for the permanent delete to occur.

At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate and cancel the deletion of the secret.

When a secret is scheduled for deletion, you cannot retrieve the secret value. You must first cancel the deletion with RestoreSecret and then you can retrieve the secret.

Required permissions: secretsmanager:DeleteSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Deletes a secret and all of its versions. You can specify a recovery window during which you can restore the secret. The minimum recovery window is 7 days. The default recovery window is 30 days. Secrets Manager attaches a DeletionDate stamp to the secret that specifies the end of the recovery window. At the end of the recovery window, Secrets Manager deletes the secret permanently.

You can't delete a primary secret that is replicated to other Regions. You must first delete the replicas using RemoveRegionsFromReplication, and then delete the primary secret. When you delete a replica, it is deleted immediately.

You can't directly delete a version of a secret. Instead, you remove all staging labels from the version using UpdateSecretVersionStage. This marks the version as deprecated, and then Secrets Manager can automatically delete the version in the background.

To determine whether an application still uses a secret, you can create an Amazon CloudWatch alarm to alert you to any attempts to access a secret during the recovery window. For more information, see Monitor secrets scheduled for deletion.

Secrets Manager performs the permanent secret deletion at the end of the waiting period as a background task with low priority. There is no guarantee of a specific time after the recovery window for the permanent delete to occur.

At any time before recovery window ends, you can use RestoreSecret to remove the DeletionDate and cancel the deletion of the secret.

When a secret is scheduled for deletion, you cannot retrieve the secret value. You must first cancel the deletion with RestoreSecret and then you can retrieve the secret.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:DeleteSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "DescribeSecret":{ "name":"DescribeSecret", @@ -96,7 +96,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager only returns fields that have a value in the response.

Required permissions: secretsmanager:DescribeSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Retrieves the details of a secret. It does not include the encrypted secret value. Secrets Manager only returns fields that have a value in the response.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:DescribeSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "GetRandomPassword":{ "name":"GetRandomPassword", @@ -111,7 +111,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support.

Required permissions: secretsmanager:GetRandomPassword. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Generates a random password. We recommend that you specify the maximum length and include every character type that the system you are generating a password for can support.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:GetRandomPassword. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "GetResourcePolicy":{ "name":"GetResourcePolicy", @@ -127,7 +127,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Retrieves the JSON text of the resource-based policy document attached to the secret. For more information about permissions policies attached to a secret, see Permissions policies attached to a secret.

Required permissions: secretsmanager:GetResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Retrieves the JSON text of the resource-based policy document attached to the secret. For more information about permissions policies attached to a secret, see Permissions policies attached to a secret.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:GetResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "GetSecretValue":{ "name":"GetSecretValue", @@ -144,7 +144,7 @@ {"shape":"DecryptionFailure"}, {"shape":"InternalServiceError"} ], - "documentation":"

Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.

We recommend that you cache your secret values by using client-side caching. Caching secrets improves speed and reduces your costs. For more information, see Cache secrets for your applications.

To retrieve the previous version of a secret, use VersionStage and specify AWSPREVIOUS. To revert to the previous version of a secret, call UpdateSecretVersionStage.

Required permissions: secretsmanager:GetSecretValue. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key aws/secretsmanager, then you also need kms:Decrypt permissions for that key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Retrieves the contents of the encrypted fields SecretString or SecretBinary from the specified version of a secret, whichever contains content.

We recommend that you cache your secret values by using client-side caching. Caching secrets improves speed and reduces your costs. For more information, see Cache secrets for your applications.

To retrieve the previous version of a secret, use VersionStage and specify AWSPREVIOUS. To revert to the previous version of a secret, call UpdateSecretVersionStage.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:GetSecretValue. If the secret is encrypted using a customer-managed key instead of the Amazon Web Services managed key aws/secretsmanager, then you also need kms:Decrypt permissions for that key. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "ListSecretVersionIds":{ "name":"ListSecretVersionIds", @@ -160,7 +160,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Lists the versions of a secret. Secrets Manager uses staging labels to indicate the different versions of a secret. For more information, see Secrets Manager concepts: Versions.

To list the secrets in the account, use ListSecrets.

Required permissions: secretsmanager:ListSecretVersionIds. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Lists the versions of a secret. Secrets Manager uses staging labels to indicate the different versions of a secret. For more information, see Secrets Manager concepts: Versions.

To list the secrets in the account, use ListSecrets.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ListSecretVersionIds. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "ListSecrets":{ "name":"ListSecrets", @@ -175,7 +175,7 @@ {"shape":"InvalidNextTokenException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console.

ListSecrets is eventually consistent, however it might not reflect changes from the last five minutes. To get the latest information for a specific secret, use DescribeSecret.

To list the versions of a secret, use ListSecretVersionIds.

To get the secret value from SecretString or SecretBinary, call GetSecretValue.

For information about finding secrets in the console, see Find secrets in Secrets Manager.

Required permissions: secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Lists the secrets that are stored by Secrets Manager in the Amazon Web Services account, not including secrets that are marked for deletion. To see secrets marked for deletion, use the Secrets Manager console.

ListSecrets is eventually consistent, however it might not reflect changes from the last five minutes. To get the latest information for a specific secret, use DescribeSecret.

To list the versions of a secret, use ListSecretVersionIds.

To get the secret value from SecretString or SecretBinary, call GetSecretValue.

For information about finding secrets in the console, see Find secrets in Secrets Manager.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ListSecrets. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "PutResourcePolicy":{ "name":"PutResourcePolicy", @@ -193,7 +193,7 @@ {"shape":"InvalidRequestException"}, {"shape":"PublicPolicyException"} ], - "documentation":"

Attaches a resource-based permission policy to a secret. A resource-based policy is optional. For more information, see Authentication and access control for Secrets Manager

For information about attaching a policy in the console, see Attach a permissions policy to a secret.

Required permissions: secretsmanager:PutResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Attaches a resource-based permission policy to a secret. A resource-based policy is optional. For more information, see Authentication and access control for Secrets Manager

For information about attaching a policy in the console, see Attach a permissions policy to a secret.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:PutResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "PutSecretValue":{ "name":"PutSecretValue", @@ -213,7 +213,7 @@ {"shape":"InternalServiceError"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value.

We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it .

If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.

Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Creates a new version with a new encrypted secret value and attaches it to the secret. The version can contain a new SecretString value or a new SecretBinary value.

We recommend you avoid calling PutSecretValue at a sustained rate of more than once every 10 minutes. When you update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you call PutSecretValue more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

You can specify the staging labels to attach to the new version in VersionStages. If you don't include VersionStages, then Secrets Manager automatically moves the staging label AWSCURRENT to this version. If this operation creates the first version for the secret, then Secrets Manager automatically attaches the staging label AWSCURRENT to it. If this operation moves the staging label AWSCURRENT from another version to this version, then Secrets Manager also automatically moves the staging label AWSPREVIOUS to the version that AWSCURRENT was removed from.

This operation is idempotent. If you call this operation with a ClientRequestToken that matches an existing version's VersionId, and you specify the same secret data, the operation succeeds but does nothing. However, if the secret data is different, then the operation fails because you can't modify an existing version; you can only create new ones.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:PutSecretValue. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "RemoveRegionsFromReplication":{ "name":"RemoveRegionsFromReplication", @@ -229,7 +229,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

For a secret that is replicated to other Regions, deletes the secret replicas from the Regions you specify.

Required permissions: secretsmanager:RemoveRegionsFromReplication. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

For a secret that is replicated to other Regions, deletes the secret replicas from the Regions you specify.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:RemoveRegionsFromReplication. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "ReplicateSecretToRegions":{ "name":"ReplicateSecretToRegions", @@ -245,7 +245,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Replicates the secret to a new Regions. See Multi-Region secrets.

Required permissions: secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Replicates the secret to a new Regions. See Multi-Region secrets.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ReplicateSecretToRegions. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "RestoreSecret":{ "name":"RestoreSecret", @@ -261,7 +261,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Cancels the scheduled deletion of a secret by removing the DeletedDate time stamp. You can access a secret again after it has been restored.

Required permissions: secretsmanager:RestoreSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Cancels the scheduled deletion of a secret by removing the DeletedDate time stamp. You can access a secret again after it has been restored.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:RestoreSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "RotateSecret":{ "name":"RotateSecret", @@ -277,7 +277,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Configures and starts the asynchronous process of rotating the secret. For more information about rotation, see Rotate secrets.

If you include the configuration parameters, the operation sets the values for the secret and then immediately starts a rotation. If you don't include the configuration parameters, the operation starts a rotation with the values already stored in the secret.

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the secret value is in the JSON structure of a database secret. In particular, if you want to use the alternating users strategy, your secret must contain the ARN of a superuser secret.

To configure rotation, you also need the ARN of an Amazon Web Services Lambda function and the schedule for the rotation. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the database or service to match. After testing the new credentials, the function marks the new secret version with the staging label AWSCURRENT. Then anyone who retrieves the secret gets the new version. For more information, see How rotation works.

You can create the Lambda rotation function based on the rotation function templates that Secrets Manager provides. Choose a template that matches your Rotation strategy.

When rotation is successful, the AWSPENDING staging label might be attached to the same version as the AWSCURRENT version, or it might not be attached to any version. If the AWSPENDING staging label is present but not attached to the same version as AWSCURRENT, then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Required permissions: secretsmanager:RotateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. You also need lambda:InvokeFunction permissions on the rotation function. For more information, see Permissions for rotation.

" + "documentation":"

Configures and starts the asynchronous process of rotating the secret. For more information about rotation, see Rotate secrets.

If you include the configuration parameters, the operation sets the values for the secret and then immediately starts a rotation. If you don't include the configuration parameters, the operation starts a rotation with the values already stored in the secret.

For database credentials you want to rotate, for Secrets Manager to be able to rotate the secret, you must make sure the secret value is in the JSON structure of a database secret. In particular, if you want to use the alternating users strategy, your secret must contain the ARN of a superuser secret.

To configure rotation, you also need the ARN of an Amazon Web Services Lambda function and the schedule for the rotation. The Lambda rotation function creates a new version of the secret and creates or updates the credentials on the database or service to match. After testing the new credentials, the function marks the new secret version with the staging label AWSCURRENT. Then anyone who retrieves the secret gets the new version. For more information, see How rotation works.

You can create the Lambda rotation function based on the rotation function templates that Secrets Manager provides. Choose a template that matches your Rotation strategy.

When rotation is successful, the AWSPENDING staging label might be attached to the same version as the AWSCURRENT version, or it might not be attached to any version. If the AWSPENDING staging label is present but not attached to the same version as AWSCURRENT, then any later invocation of RotateSecret assumes that a previous rotation request is still in progress and returns an error.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:RotateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. You also need lambda:InvokeFunction permissions on the rotation function. For more information, see Permissions for rotation.

" }, "StopReplicationToReplica":{ "name":"StopReplicationToReplica", @@ -293,7 +293,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Removes the link between the replica secret and the primary secret and promotes the replica to a primary secret in the replica Region.

You must call this operation from the Region in which you want to promote the replica to a primary secret.

Required permissions: secretsmanager:StopReplicationToReplica. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Removes the link between the replica secret and the primary secret and promotes the replica to a primary secret in the replica Region.

You must call this operation from the Region in which you want to promote the replica to a primary secret.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:StopReplicationToReplica. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "TagResource":{ "name":"TagResource", @@ -308,7 +308,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Attaches tags to a secret. Tags consist of a key name and a value. Tags are part of the secret's metadata. They are not associated with specific versions of the secret. This operation appends tags to the existing list of tags.

The following restrictions apply to tags:

If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Required permissions: secretsmanager:TagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Attaches tags to a secret. Tags consist of a key name and a value. Tags are part of the secret's metadata. They are not associated with specific versions of the secret. This operation appends tags to the existing list of tags.

The following restrictions apply to tags:

If you use tags as part of your security strategy, then adding or removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:TagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "UntagResource":{ "name":"UntagResource", @@ -323,7 +323,7 @@ {"shape":"InvalidParameterException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Removes specific tags from a secret.

This operation is idempotent. If a requested tag is not attached to the secret, no error is returned and the secret metadata is unchanged.

If you use tags as part of your security strategy, then removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Required permissions: secretsmanager:UntagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Removes specific tags from a secret.

This operation is idempotent. If a requested tag is not attached to the secret, no error is returned and the secret metadata is unchanged.

If you use tags as part of your security strategy, then removing a tag can change permissions. If successfully completing this operation would result in you losing your permissions for this secret, then the operation is blocked and returns an Access Denied error.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UntagResource. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "UpdateSecret":{ "name":"UpdateSecret", @@ -345,7 +345,7 @@ {"shape":"PreconditionNotMetException"}, {"shape":"DecryptionFailure"} ], - "documentation":"

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

To change the rotation configuration of a secret, use RotateSecret instead.

We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically attaches the staging label AWSCURRENT to the new version.

If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.

Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey and kms:Decrypt permissions on the key. For more information, see Secret encryption and decryption.

" + "documentation":"

Modifies the details of a secret, including metadata and the secret value. To change the secret value, you can also use PutSecretValue.

To change the rotation configuration of a secret, use RotateSecret instead.

We recommend you avoid calling UpdateSecret at a sustained rate of more than once every 10 minutes. When you call UpdateSecret to update the secret value, Secrets Manager creates a new version of the secret. Secrets Manager removes outdated versions when there are more than 100, but it does not remove versions created less than 24 hours ago. If you update the secret value more than once every 10 minutes, you create more versions than Secrets Manager removes, and you will reach the quota for secret versions.

If you include SecretString or SecretBinary to create a new secret version, Secrets Manager automatically moves the staging label AWSCURRENT to the new version. Then it attaches the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If you call this operation with a ClientRequestToken that matches an existing version's VersionId, the operation results in an error. You can't modify an existing version, you can only create a new version. To remove a version, remove all staging labels from it. See UpdateSecretVersionStage.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters except SecretBinary or SecretString because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UpdateSecret. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager. If you use a customer managed key, you must also have kms:GenerateDataKey and kms:Decrypt permissions on the key. For more information, see Secret encryption and decryption.

" }, "UpdateSecretVersionStage":{ "name":"UpdateSecretVersionStage", @@ -362,7 +362,7 @@ {"shape":"LimitExceededException"}, {"shape":"InternalServiceError"} ], - "documentation":"

Modifies the staging labels attached to a version of a secret. Secrets Manager uses staging labels to track a version as it progresses through the secret rotation process. Each staging label can be attached to only one version at a time. To add a staging label to a version when it is already attached to another version, Secrets Manager first removes it from the other version first and then attaches it to this one. For more information about versions and staging labels, see Concepts: Version.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels for the version.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Required permissions: secretsmanager:UpdateSecretVersionStage. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Modifies the staging labels attached to a version of a secret. Secrets Manager uses staging labels to track a version as it progresses through the secret rotation process. Each staging label can be attached to only one version at a time. To add a staging label to a version when it is already attached to another version, Secrets Manager first removes it from the other version first and then attaches it to this one. For more information about versions and staging labels, see Concepts: Version.

The staging labels that you specify in the VersionStage parameter are added to the existing list of staging labels for the version.

You can move the AWSCURRENT staging label to this version by including it in this call.

Whenever you move AWSCURRENT, Secrets Manager automatically moves the label AWSPREVIOUS to the version that AWSCURRENT was removed from.

If this action results in the last label being removed from a version, then the version is considered to be 'deprecated' and can be deleted by Secrets Manager.

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:UpdateSecretVersionStage. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" }, "ValidateResourcePolicy":{ "name":"ValidateResourcePolicy", @@ -379,7 +379,7 @@ {"shape":"InternalServiceError"}, {"shape":"InvalidRequestException"} ], - "documentation":"

Validates that a resource policy does not grant a wide range of principals access to your secret. A resource-based policy is optional for secrets.

The API performs three checks when validating the policy:

Required permissions: secretsmanager:ValidateResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" + "documentation":"

Validates that a resource policy does not grant a wide range of principals access to your secret. A resource-based policy is optional for secrets.

The API performs three checks when validating the policy:

Secrets Manager generates a CloudTrail log entry when you call this action. Do not include sensitive information in request parameters because it might be logged. For more information, see Logging Secrets Manager events with CloudTrail.

Required permissions: secretsmanager:ValidateResourcePolicy. For more information, see IAM policy actions for Secrets Manager and Authentication and access control in Secrets Manager.

" } }, "shapes":{ diff --git a/botocore/data/translate/2017-07-01/service-2.json b/botocore/data/translate/2017-07-01/service-2.json index b5ad55986b..0f7630e6a5 100644 --- a/botocore/data/translate/2017-07-01/service-2.json +++ b/botocore/data/translate/2017-07-01/service-2.json @@ -26,7 +26,9 @@ {"shape":"InvalidRequestException"}, {"shape":"LimitExceededException"}, {"shape":"TooManyRequestsException"}, + {"shape":"TooManyTagsException"}, {"shape":"ConflictException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"InternalServerException"} ], "documentation":"

Creates a parallel data resource in Amazon Translate by importing an input file from Amazon S3. Parallel data files contain examples that show how you want segments of text to be translated. By adding parallel data, you can influence the style, tone, and word choice in your translation output.

" @@ -121,6 +123,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"LimitExceededException"}, {"shape":"TooManyRequestsException"}, + {"shape":"TooManyTagsException"}, + {"shape":"ConcurrentModificationException"}, {"shape":"InternalServerException"} ], "documentation":"

Creates or updates a custom terminology, depending on whether one already exists for the given terminology name. Importing a terminology with the same name as an existing one will merge the terminologies based on the chosen merge strategy. The only supported merge strategy is OVERWRITE, where the imported terminology overwrites the existing terminology of the same name.

If you import a terminology that overwrites an existing one, the new terminology takes up to 10 minutes to fully propagate. After that, translations have access to the new terminology.

" @@ -156,6 +160,20 @@ ], "documentation":"

Provides a list of your parallel data resources in Amazon Translate.

" }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, "ListTerminologies":{ "name":"ListTerminologies", "http":{ @@ -220,6 +238,22 @@ ], "documentation":"

Stops an asynchronous batch translation job that is in progress.

If the job's state is IN_PROGRESS, the job will be marked for termination and put into the STOP_REQUESTED state. If the job completes before it can be stopped, it is put into the COMPLETED state. Otherwise, the job is put into the STOPPED state.

Asynchronous batch translation jobs are started with the StartTextTranslationJob operation. You can use the DescribeTextTranslationJob or ListTextTranslationJobs operations to get a batch translation job's JobId.

" }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"TooManyTagsException"}, + {"shape":"InternalServerException"} + ] + }, "TranslateText":{ "name":"TranslateText", "http":{ @@ -240,6 +274,21 @@ ], "documentation":"

Translates input text from the source language to the target language. For a list of available languages and language codes, see what-is-languages.

" }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ] + }, "UpdateParallelData":{ "name":"UpdateParallelData", "http":{ @@ -338,7 +387,8 @@ "shape":"ClientTokenString", "documentation":"

A unique identifier for the request. This token is automatically generated when you use Amazon Translate through an AWS SDK.

", "idempotencyToken":true - } + }, + "Tags":{"shape":"TagList"} } }, "CreateParallelDataResponse":{ @@ -575,7 +625,8 @@ "EncryptionKey":{ "shape":"EncryptionKey", "documentation":"

The encryption key for the custom terminology being imported.

" - } + }, + "Tags":{"shape":"TagList"} } }, "ImportTerminologyResponse":{ @@ -784,6 +835,19 @@ } } }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{"shape":"ResourceArn"} + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{"shape":"TagList"} + } + }, "ListTerminologiesRequest":{ "type":"structure", "members":{ @@ -1010,6 +1074,11 @@ "type":"string", "enum":["MASK"] }, + "ResourceArn":{ + "type":"string", + "max":512, + "min":1 + }, "ResourceName":{ "type":"string", "max":256, @@ -1137,10 +1206,58 @@ "max":10000, "pattern":"[\\P{M}\\p{M}]{0,10000}" }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{"shape":"TagKey"}, + "Value":{"shape":"TagValue"} + } + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1 + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{"shape":"ResourceArn"}, + "Tags":{"shape":"TagList"} + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0 + }, "TargetLanguageCodeStringList":{ "type":"list", "member":{"shape":"LanguageCodeString"}, - "max":1, "min":1 }, "Term":{ @@ -1394,6 +1511,14 @@ "documentation":"

You have made too many requests within a short period of time. Wait for a short time and then try your request again.

", "exception":true }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "ResourceArn":{"shape":"ResourceArn"} + }, + "exception":true + }, "TranslateTextRequest":{ "type":"structure", "required":[ @@ -1497,6 +1622,22 @@ "documentation":"

Amazon Translate does not support translation from the language of the source text into the requested target language. For more information, see how-to-error-msg.

", "exception":true }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{"shape":"ResourceArn"}, + "TagKeys":{"shape":"TagKeyList"} + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateParallelDataRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index 07b41ae6c9..e2f0245ceb 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -1211,6 +1211,10 @@ "ReconnectEnabled":{ "shape":"ReconnectEnum", "documentation":"

Specifies whether users can cache their credentials on the Amazon WorkSpaces client. When enabled, users can choose to reconnect to their WorkSpaces without re-entering their credentials.

" + }, + "LogUploadEnabled":{ + "shape":"LogUploadEnum", + "documentation":"

Specifies whether users can upload diagnostic log files of Amazon WorkSpaces client directly to WorkSpaces to troubleshoot issues when using the WorkSpaces client. When enabled, the log files will be sent to WorkSpaces automatically and will be applied to all users in the specified directory.

" } }, "documentation":"

Describes an Amazon WorkSpaces client.

" @@ -1691,7 +1695,7 @@ }, "OwnerAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account that owns the image.

" + "documentation":"

The identifier of the Amazon Web Services account that owns the image.

" } } }, @@ -2878,6 +2882,13 @@ } } }, + "LogUploadEnum":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "LoginMessage":{ "type":"map", "key":{"shape":"ClientLocale"}, From 1d0f45b3e965eadd1dd872d06facdc42d5c8509e Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 29 Sep 2022 18:09:39 +0000 Subject: [PATCH 3/4] Update to latest endpoints --- botocore/data/endpoints.json | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index a737e8e29c..e40ae5328c 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -9148,6 +9148,17 @@ "us-west-2" : { } } }, + "migrationhub-orchestrator" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "migrationhub-strategy" : { "endpoints" : { "ap-northeast-1" : { }, @@ -20814,6 +20825,11 @@ "us-isob-east-1" : { } } }, + "resource-groups" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "route53" : { "endpoints" : { "aws-iso-b-global" : { From 934de9203cc84f222c6583e4254699c5750cd762 Mon Sep 17 00:00:00 2001 From: aws-sdk-python-automation Date: Thu, 29 Sep 2022 18:09:42 +0000 Subject: [PATCH 4/4] Bumping version to 1.27.83 --- .changes/1.27.83.json | 57 +++++++++++++++++++ .../next-release/api-change-acm-11031.json | 5 -- .../next-release/api-change-ec2-28737.json | 5 -- .../api-change-emrserverless-31104.json | 5 -- .../next-release/api-change-fsx-23319.json | 5 -- ...change-migrationhuborchestrator-50518.json | 5 -- .../next-release/api-change-polly-705.json | 5 -- .../next-release/api-change-proton-80007.json | 5 -- .../api-change-sagemaker-88536.json | 5 -- .../api-change-secretsmanager-71738.json | 5 -- .../api-change-translate-83017.json | 5 -- .../api-change-workspaces-80407.json | 5 -- CHANGELOG.rst | 16 ++++++ botocore/__init__.py | 2 +- docs/source/conf.py | 2 +- 15 files changed, 75 insertions(+), 57 deletions(-) create mode 100644 .changes/1.27.83.json delete mode 100644 .changes/next-release/api-change-acm-11031.json delete mode 100644 .changes/next-release/api-change-ec2-28737.json delete mode 100644 .changes/next-release/api-change-emrserverless-31104.json delete mode 100644 .changes/next-release/api-change-fsx-23319.json delete mode 100644 .changes/next-release/api-change-migrationhuborchestrator-50518.json delete mode 100644 .changes/next-release/api-change-polly-705.json delete mode 100644 .changes/next-release/api-change-proton-80007.json delete mode 100644 .changes/next-release/api-change-sagemaker-88536.json delete mode 100644 .changes/next-release/api-change-secretsmanager-71738.json delete mode 100644 .changes/next-release/api-change-translate-83017.json delete mode 100644 .changes/next-release/api-change-workspaces-80407.json diff --git a/.changes/1.27.83.json b/.changes/1.27.83.json new file mode 100644 index 0000000000..35e4803526 --- /dev/null +++ b/.changes/1.27.83.json @@ -0,0 +1,57 @@ +[ + { + "category": "``acm``", + "description": "This update returns additional certificate details such as certificate SANs and allows sorting in the ListCertificates API.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "u-3tb1 instances are powered by Intel Xeon Platinum 8176M (Skylake) processors and are purpose-built to run large in-memory databases.", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "This release adds API support to debug Amazon EMR Serverless jobs in real-time with live application UIs", + "type": "api-change" + }, + { + "category": "``fsx``", + "description": "This release adds support for Amazon File Cache.", + "type": "api-change" + }, + { + "category": "``migrationhuborchestrator``", + "description": "Introducing AWS MigrationHubOrchestrator. This is the first public release of AWS MigrationHubOrchestrator.", + "type": "api-change" + }, + { + "category": "``polly``", + "description": "Added support for the new Cantonese voice - Hiujin. Hiujin is available as a Neural voice only.", + "type": "api-change" + }, + { + "category": "``proton``", + "description": "This release adds an option to delete pipeline provisioning repositories using the UpdateAccountSettings API", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "SageMaker Training Managed Warm Pools let you retain provisioned infrastructure to reduce latency for repetitive training workloads.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Documentation updates for Secrets Manager", + "type": "api-change" + }, + { + "category": "``translate``", + "description": "This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization.", + "type": "api-change" + }, + { + "category": "``workspaces``", + "description": "This release includes diagnostic log uploading feature. If it is enabled, the log files of WorkSpaces Windows client will be sent to Amazon WorkSpaces automatically for troubleshooting. You can use modifyClientProperty api to enable/disable this feature.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/api-change-acm-11031.json b/.changes/next-release/api-change-acm-11031.json deleted file mode 100644 index 29206055c5..0000000000 --- a/.changes/next-release/api-change-acm-11031.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``acm``", - "description": "This update returns additional certificate details such as certificate SANs and allows sorting in the ListCertificates API." -} diff --git a/.changes/next-release/api-change-ec2-28737.json b/.changes/next-release/api-change-ec2-28737.json deleted file mode 100644 index 47eb1b7af5..0000000000 --- a/.changes/next-release/api-change-ec2-28737.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``ec2``", - "description": "u-3tb1 instances are powered by Intel Xeon Platinum 8176M (Skylake) processors and are purpose-built to run large in-memory databases." -} diff --git a/.changes/next-release/api-change-emrserverless-31104.json b/.changes/next-release/api-change-emrserverless-31104.json deleted file mode 100644 index 05b6d456f9..0000000000 --- a/.changes/next-release/api-change-emrserverless-31104.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``emr-serverless``", - "description": "This release adds API support to debug Amazon EMR Serverless jobs in real-time with live application UIs" -} diff --git a/.changes/next-release/api-change-fsx-23319.json b/.changes/next-release/api-change-fsx-23319.json deleted file mode 100644 index 73938698c6..0000000000 --- a/.changes/next-release/api-change-fsx-23319.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``fsx``", - "description": "This release adds support for Amazon File Cache." -} diff --git a/.changes/next-release/api-change-migrationhuborchestrator-50518.json b/.changes/next-release/api-change-migrationhuborchestrator-50518.json deleted file mode 100644 index b136e29e9d..0000000000 --- a/.changes/next-release/api-change-migrationhuborchestrator-50518.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``migrationhuborchestrator``", - "description": "Introducing AWS MigrationHubOrchestrator. This is the first public release of AWS MigrationHubOrchestrator." -} diff --git a/.changes/next-release/api-change-polly-705.json b/.changes/next-release/api-change-polly-705.json deleted file mode 100644 index 5089e12e94..0000000000 --- a/.changes/next-release/api-change-polly-705.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``polly``", - "description": "Added support for the new Cantonese voice - Hiujin. Hiujin is available as a Neural voice only." -} diff --git a/.changes/next-release/api-change-proton-80007.json b/.changes/next-release/api-change-proton-80007.json deleted file mode 100644 index f477804297..0000000000 --- a/.changes/next-release/api-change-proton-80007.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``proton``", - "description": "This release adds an option to delete pipeline provisioning repositories using the UpdateAccountSettings API" -} diff --git a/.changes/next-release/api-change-sagemaker-88536.json b/.changes/next-release/api-change-sagemaker-88536.json deleted file mode 100644 index 093a8819c3..0000000000 --- a/.changes/next-release/api-change-sagemaker-88536.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``sagemaker``", - "description": "SageMaker Training Managed Warm Pools let you retain provisioned infrastructure to reduce latency for repetitive training workloads." -} diff --git a/.changes/next-release/api-change-secretsmanager-71738.json b/.changes/next-release/api-change-secretsmanager-71738.json deleted file mode 100644 index 4cfcfab432..0000000000 --- a/.changes/next-release/api-change-secretsmanager-71738.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``secretsmanager``", - "description": "Documentation updates for Secrets Manager" -} diff --git a/.changes/next-release/api-change-translate-83017.json b/.changes/next-release/api-change-translate-83017.json deleted file mode 100644 index 3dac689271..0000000000 --- a/.changes/next-release/api-change-translate-83017.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``translate``", - "description": "This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization." -} diff --git a/.changes/next-release/api-change-workspaces-80407.json b/.changes/next-release/api-change-workspaces-80407.json deleted file mode 100644 index e8c7d41499..0000000000 --- a/.changes/next-release/api-change-workspaces-80407.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "type": "api-change", - "category": "``workspaces``", - "description": "This release includes diagnostic log uploading feature. If it is enabled, the log files of WorkSpaces Windows client will be sent to Amazon WorkSpaces automatically for troubleshooting. You can use modifyClientProperty api to enable/disable this feature." -} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 46d09cd955..96d333d59b 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,22 @@ CHANGELOG ========= +1.27.83 +======= + +* api-change:``acm``: This update returns additional certificate details such as certificate SANs and allows sorting in the ListCertificates API. +* api-change:``ec2``: u-3tb1 instances are powered by Intel Xeon Platinum 8176M (Skylake) processors and are purpose-built to run large in-memory databases. +* api-change:``emr-serverless``: This release adds API support to debug Amazon EMR Serverless jobs in real-time with live application UIs +* api-change:``fsx``: This release adds support for Amazon File Cache. +* api-change:``migrationhuborchestrator``: Introducing AWS MigrationHubOrchestrator. This is the first public release of AWS MigrationHubOrchestrator. +* api-change:``polly``: Added support for the new Cantonese voice - Hiujin. Hiujin is available as a Neural voice only. +* api-change:``proton``: This release adds an option to delete pipeline provisioning repositories using the UpdateAccountSettings API +* api-change:``sagemaker``: SageMaker Training Managed Warm Pools let you retain provisioned infrastructure to reduce latency for repetitive training workloads. +* api-change:``secretsmanager``: Documentation updates for Secrets Manager +* api-change:``translate``: This release enables customers to access control rights on Translate resources like Parallel Data and Custom Terminology using Tag Based Authorization. +* api-change:``workspaces``: This release includes diagnostic log uploading feature. If it is enabled, the log files of WorkSpaces Windows client will be sent to Amazon WorkSpaces automatically for troubleshooting. You can use modifyClientProperty api to enable/disable this feature. + + 1.27.82 ======= diff --git a/botocore/__init__.py b/botocore/__init__.py index f38827f3dd..30975583e7 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re -__version__ = '1.27.82' +__version__ = '1.27.83' class NullHandler(logging.Handler): diff --git a/docs/source/conf.py b/docs/source/conf.py index fad8548127..96b6671be9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ # The short X.Y version. version = '1.27.' # The full version, including alpha/beta/rc tags. -release = '1.27.82' +release = '1.27.83' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages.